diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c591459f01bb7..ba3e3c1175b2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -95,7 +95,7 @@ Contributing to the Elasticsearch codebase JDK 10 is required to build Elasticsearch. You must have a JDK 10 installation with the environment variable `JAVA_HOME` referencing the path to Java home for your JDK 10 installation. By default, tests use the same runtime as `JAVA_HOME`. -However, since Elasticsearch, supports JDK 8 the build supports compiling with +However, since Elasticsearch supports JDK 8, the build supports compiling with JDK 10 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of a JDK 8 installation. Note that this mechanism can be used to test against other JDKs as well, this is not only limited to JDK 8. @@ -325,21 +325,19 @@ common configurations in our build and how we use them:
`compile`
Code that is on the classpath at both compile and -runtime. If the [`shadow`][shadow-plugin] plugin is applied to the project then -this code is bundled into the jar produced by the project.
+runtime.
`runtime`
Code that is not on the classpath at compile time but is on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
-
`compileOnly`
Code that is on the classpath at comile time but that +
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
-
`shadow`
Only available in projects with the shadow plugin. Code -that is on the classpath at both compile and runtime but it *not* bundled into -the jar produced by the project. If you depend on a project with the `shadow` -plugin then you need to depend on this configuration because it will bring -along all of the dependencies you need at runtime.
+
`bundle`
Only available in projects with the shadow plugin, +dependencies with this configuration are bundled into the jar produced by the +build. Since IDEs do not understand this configuration we rig them to treat +dependencies in this configuration as `compile` dependencies.
`testCompile`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
diff --git a/build.gradle b/build.gradle index 3674e0a540bf8..f8282ca5ae89e 100644 --- a/build.gradle +++ b/build.gradle @@ -16,21 +16,19 @@ * specific language governing permissions and limitations * under the License. */ + import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder -import org.gradle.util.GradleVersion -import org.gradle.util.DistributionLocator -import org.apache.tools.ant.taskdefs.condition.Os -import org.apache.tools.ant.filters.ReplaceTokens import java.nio.file.Files import java.nio.file.Path -import java.security.MessageDigest plugins { id 'com.gradle.build-scan' version '1.13.2' @@ -87,8 +85,15 @@ subprojects { } } } + repositories { + maven { + name = 'localTest' + url = "${rootProject.buildDir}/local-test-repo" + } + } } } + plugins.withType(BuildPlugin).whenPluginAdded { project.licenseFile = project.rootProject.file('licenses/APACHE-LICENSE-2.0.txt') project.noticeFile = project.rootProject.file('NOTICE.txt') @@ -228,6 +233,7 @@ subprojects { "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level', "org.elasticsearch.client:test:${version}": ':client:test', "org.elasticsearch.client:transport:${version}": ':client:transport', + "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', @@ -296,7 +302,7 @@ subprojects { // org.elasticsearch:elasticsearch must be the last one or all the links for the // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. - if (project.plugins.hasPlugin(BuildPlugin)) { + if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) { String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> @@ -314,13 +320,6 @@ subprojects { */ project.evaluationDependsOn(upstreamProject.path) project.javadoc.source += upstreamProject.javadoc.source - /* - * Do not add those projects to the javadoc classpath because - * we are going to resolve them with their source instead. - */ - project.javadoc.classpath = project.javadoc.classpath.filter { f -> - false == upstreamProject.configurations.archives.artifacts.files.files.contains(f) - } /* * Instead we need the upstream project's javadoc classpath so * we don't barf on the classes that it references. @@ -337,16 +336,16 @@ subprojects { project.configurations.compile.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) project.configurations.compileOnly.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) if (hasShadow) { - project.configurations.shadow.dependencies + project.configurations.bundle.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(false, c) }) + .each({ c -> depJavadocClosure(true, c) }) } } } @@ -510,30 +509,33 @@ allprojects { tasks.cleanEclipse.dependsOn(wipeEclipseSettings) // otherwise the eclipse merging is *super confusing* tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) + + // work arround https://github.com/gradle/gradle/issues/6582 + tasks.eclipseProject.mustRunAfter tasks.cleanEclipseProject + tasks.matching { it.name == 'eclipseClasspath' }.all { + it.mustRunAfter { tasks.cleanEclipseClasspath } + } + tasks.matching { it.name == 'eclipseJdt' }.all { + it.mustRunAfter { tasks.cleanEclipseJdt } + } + tasks.copyEclipseSettings.mustRunAfter tasks.wipeEclipseSettings } allprojects { /* * IntelliJ and Eclipse don't know about the shadow plugin so when we're - * in "IntelliJ mode" or "Eclipse mode" add "runtime" dependencies - * eveywhere where we see a "shadow" dependency which will cause them to - * reference shadowed projects directly rather than rely on the shadowing - * to include them. This is the correct thing for it to do because it - * doesn't run the jar shadowing at all. This isn't needed for the project + * in "IntelliJ mode" or "Eclipse mode" switch "bundle" dependencies into + * regular "compile" dependencies. This isn't needed for the project * itself because the IDE configuration is done by SourceSets but it is * *is* needed for projects that depends on the project doing the shadowing. * Without this they won't properly depend on the shadowed project. */ if (isEclipse || isIdea) { - configurations.all { Configuration configuration -> - dependencies.all { Dependency dep -> - if (dep instanceof ProjectDependency) { - if (dep.getTargetConfiguration() == 'shadow') { - configuration.dependencies.add(project.dependencies.project(path: dep.dependencyProject.path, configuration: 'runtime')) - } - } - } - } + project.plugins.withType(ShadowPlugin).whenPluginAdded { + project.afterEvaluate { + project.configurations.compile.extendsFrom project.configurations.bundle + } + } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 5775b2b6323f1..da8ad788164d2 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -17,6 +17,7 @@ * under the License. */ import java.nio.file.Files +import org.gradle.util.GradleVersion plugins { id 'java-gradle-plugin' @@ -102,7 +103,6 @@ dependencies { compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.5' compile 'org.apache.rat:apache-rat:0.11' compile "org.elasticsearch:jna:4.5.1" compile 'com.github.jengelman.gradle.plugins:shadow:2.0.4' @@ -162,11 +162,23 @@ if (project != rootProject) { // it's fine as we run them as part of :buildSrc test.enabled = false task integTest(type: Test) { + // integration test requires the local testing repo for example plugin builds + dependsOn project.rootProject.allprojects.collect { + it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} + } exclude "**/*Tests.class" - include "**/*IT.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) + // tell BuildExamplePluginsIT where to find the example plugins + systemProperty ( + 'test.build-tools.plugin.examples', + files( + project(':example-plugins').subprojects.collect { it.projectDir } + ).asPath, + ) + systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo" + systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 306a2bcb58bd1..75b5676cc34a0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -38,7 +38,6 @@ import org.gradle.api.artifacts.ModuleDependency import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact -import org.gradle.api.artifacts.SelfResolvingDependency import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionGraph import org.gradle.api.plugins.JavaPlugin @@ -79,8 +78,9 @@ class BuildPlugin implements Plugin { } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') - // these plugins add lots of info to our jars + configureConfigurations(project) configureJars(project) // jar config must be added before info broker + // these plugins add lots of info to our jars project.pluginManager.apply('nebula.info-broker') project.pluginManager.apply('nebula.info-basic') project.pluginManager.apply('nebula.info-java') @@ -91,8 +91,8 @@ class BuildPlugin implements Plugin { globalBuildInfo(project) configureRepositories(project) - configureConfigurations(project) project.ext.versions = VersionProperties.versions + configureSourceSets(project) configureCompile(project) configureJavadoc(project) configureSourcesJar(project) @@ -211,6 +211,7 @@ class BuildPlugin implements Plugin { project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion project.rootProject.ext.inFipsJvm = inFipsJvm project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) + project.rootProject.ext.java9Home = "${-> findJavaHome("9")}" } project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion @@ -224,6 +225,7 @@ class BuildPlugin implements Plugin { project.ext.javaVersions = project.rootProject.ext.javaVersions project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion + project.ext.java9Home = project.rootProject.ext.java9Home } private static String getPaddedMajorVersion(JavaVersion compilerJavaVersionEnum) { @@ -421,8 +423,10 @@ class BuildPlugin implements Plugin { project.configurations.compile.dependencies.all(disableTransitiveDeps) project.configurations.testCompile.dependencies.all(disableTransitiveDeps) project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) + project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.configurations.shadow.dependencies.all(disableTransitiveDeps) + Configuration bundle = project.configurations.create('bundle') + bundle.dependencies.all(disableTransitiveDeps) } } @@ -528,11 +532,16 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. + generatePOMTask.ext.pomFileName = null doLast { project.copy { from generatePOMTask.destination into "${project.buildDir}/distributions" - rename { "${project.archivesBaseName}-${project.version}.pom" } + rename { + generatePOMTask.ext.pomFileName == null ? + "${project.archivesBaseName}-${project.version}.pom" : + generatePOMTask.ext.pomFileName + } } } // build poms with assemble (if the assemble task exists) @@ -554,37 +563,26 @@ class BuildPlugin implements Plugin { project.publishing { publications { nebula(MavenPublication) { - artifact project.tasks.shadowJar - artifactId = project.archivesBaseName - /* - * Configure the pom to include the "shadow" as compile dependencies - * because that is how we're using them but remove all other dependencies - * because they've been shaded into the jar. - */ - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.remove(root.dependencies) - Node dependenciesNode = root.appendNode('dependencies') - project.configurations.shadow.allDependencies.each { - if (false == it instanceof SelfResolvingDependency) { - Node dependencyNode = dependenciesNode.appendNode('dependency') - dependencyNode.appendNode('groupId', it.group) - dependencyNode.appendNode('artifactId', it.name) - dependencyNode.appendNode('version', it.version) - dependencyNode.appendNode('scope', 'compile') - } - } - // Be tidy and remove the element if it is empty - if (dependenciesNode.children.empty) { - root.remove(dependenciesNode) - } - } + artifacts = [ project.tasks.shadowJar ] } } } } } + } + /** + * Add dependencies that we are going to bundle to the compile classpath. + */ + static void configureSourceSets(Project project) { + project.plugins.withType(ShadowPlugin).whenPluginAdded { + ['main', 'test'].each {name -> + SourceSet sourceSet = project.sourceSets.findByName(name) + if (sourceSet != null) { + sourceSet.compileClasspath += project.configurations.bundle + } + } + } } /** Adds compiler settings to the project */ @@ -604,7 +602,6 @@ class BuildPlugin implements Plugin { } else { options.fork = true options.forkOptions.javaHome = compilerJavaHomeFile - options.forkOptions.memoryMaximumSize = "512m" } if (targetCompatibilityVersion == JavaVersion.VERSION_1_8) { // compile with compact 3 profile by default @@ -764,9 +761,16 @@ class BuildPlugin implements Plugin { * better to be safe */ mergeServiceFiles() + /* + * Bundle dependencies of the "bundled" configuration. + */ + configurations = [project.configurations.bundle] } // Make sure we assemble the shadow jar project.tasks.assemble.dependsOn project.tasks.shadowJar + project.artifacts { + apiElements project.tasks.shadowJar + } } } @@ -871,13 +875,8 @@ class BuildPlugin implements Plugin { exclude '**/*$*.class' project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * If we make a shaded jar we test against it. - */ + // Test against a shadow jar if we made one classpath -= project.tasks.compileJava.outputs.files - classpath -= project.configurations.compile - classpath -= project.configurations.runtime - classpath += project.configurations.shadow classpath += project.tasks.shadowJar.outputs.files dependsOn project.tasks.shadowJar } @@ -903,26 +902,6 @@ class BuildPlugin implements Plugin { additionalTest.dependsOn(project.tasks.testClasses) project.check.dependsOn(additionalTest) }); - - project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * We need somewhere to configure dependencies that we don't wish - * to shade into the jar. The shadow plugin creates a "shadow" - * configuration which is *almost* exactly that. It is never - * bundled into the shaded jar but is used for main source - * compilation. Unfortunately, by default it is not used for - * *test* source compilation and isn't used in tests at all. This - * change makes it available for test compilation. - * - * Note that this isn't going to work properly with qa projects - * but they have no business applying the shadow plugin in the - * firstplace. - */ - SourceSet testSourceSet = project.sourceSets.findByName('test') - if (testSourceSet != null) { - testSourceSet.compileClasspath += project.configurations.shadow - } - } } private static configurePrecommit(Project project) { @@ -934,7 +913,7 @@ class BuildPlugin implements Plugin { it.group.startsWith('org.elasticsearch') == false } - project.configurations.compileOnly project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.dependencyLicenses.dependencies += project.configurations.shadow.fileCollection { + project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection { it.group.startsWith('org.elasticsearch') == false } } @@ -945,7 +924,7 @@ class BuildPlugin implements Plugin { deps.runtimeConfiguration = project.configurations.runtime project.plugins.withType(ShadowPlugin).whenPluginAdded { deps.runtimeConfiguration = project.configurations.create('infoDeps') - deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.shadow) + deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle) } deps.compileOnlyConfiguration = project.configurations.compileOnly project.afterEvaluate { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy index 7d5b793254fe4..daab0efc8c69a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy @@ -138,9 +138,8 @@ class VersionCollection { break } } - // caveat 0 - now dip back 2 versions to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major - 1) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + // caveat 0 - the last supported snapshot of the line is on a version that we don't support (N-2) + maintenanceBugfixSnapshot = null } else { // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line // only check till minor == 0 of the major @@ -293,7 +292,8 @@ class VersionCollection { * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 */ private Version getHighestPreviousMinor(Integer nextMajorVersion) { - return versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")).last() + SortedSet result = versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")) + return result.isEmpty() ? null : result.last() } /** diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 00f178fda9c9f..a14a3a680da1c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,27 +19,21 @@ package org.elasticsearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import nebula.plugin.info.scm.ScmInfoPlugin +import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask -import org.gradle.api.InvalidUserDataException -import org.gradle.api.JavaVersion import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.XmlProvider import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin +import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip +import org.gradle.jvm.tasks.Jar -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption import java.util.regex.Matcher import java.util.regex.Pattern - /** * Encapsulates build configuration for an Elasticsearch plugin. */ @@ -57,16 +51,10 @@ public class PluginBuildPlugin extends BuildPlugin { String name = project.pluginProperties.extension.name project.archivesBaseName = name - if (project.pluginProperties.extension.hasClientJar) { - // for plugins which work with the transport client, we copy the jar - // file to a new name, copy the nebula generated pom to the same name, - // and generate a different pom for the zip - addClientJarPomGeneration(project) - addClientJarTask(project) - } - // while the jar isn't normally published, we still at least build a pom of deps - // in case it is published, for instance when other plugins extend this plugin - configureJarPom(project) + // set teh project description so it will be picked up by publishing + project.description = project.pluginProperties.extension.description + + configurePublishing(project) project.integTestCluster.dependsOn(project.bundlePlugin) project.tasks.run.dependsOn(project.bundlePlugin) @@ -96,6 +84,32 @@ public class PluginBuildPlugin extends BuildPlugin { project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build } + private void configurePublishing(Project project) { + // Only configure publishing if applied externally + if (project.pluginProperties.extension.hasClientJar) { + project.plugins.apply(MavenScmPlugin.class) + // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + project.tasks.withType(Jar) { + baseName = baseName + "-client" + } + // always configure publishing for client jars + project.plugins.apply(MavenScmPlugin.class) + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + "-client" + ) + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + } + } else { + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + ) + } + + } + } + private static void configureDependencies(Project project) { project.dependencies { compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" @@ -143,11 +157,10 @@ public class PluginBuildPlugin extends BuildPlugin { from pluginMetadata // metadata (eg custom security policy) /* * If the plugin is using the shadow plugin then we need to bundle - * "shadow" things rather than the default jar and dependencies so - * we don't hit jar hell. + * that shadow jar. */ from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar } - from { project.plugins.hasPlugin(ShadowPlugin) ? project.configurations.shadow : project.configurations.runtime - project.configurations.compileOnly } + from project.configurations.runtime - project.configurations.compileOnly // extra files for the plugin to go into the zip from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main') { @@ -163,33 +176,6 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to move jar and associated files to a "-client" name. */ - protected static void addClientJarTask(Project project) { - Task clientJar = project.tasks.create('clientJar') - clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar) - clientJar.doFirst { - Path jarFile = project.jar.outputs.files.singleFile.toPath() - String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}") - Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING) - - String clientPomFileName = clientFileName.replace('.jar', '.pom') - Files.copy( - project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(), - jarFile.resolveSibling(clientPomFileName), - StandardCopyOption.REPLACE_EXISTING - ) - - String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar') - String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar') - Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), - StandardCopyOption.REPLACE_EXISTING) - - String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') - String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') - Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), - StandardCopyOption.REPLACE_EXISTING) - } - project.assemble.dependsOn(clientJar) - } static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/) @@ -211,39 +197,11 @@ public class PluginBuildPlugin extends BuildPlugin { /** Adds nebula publishing task to generate a pom file for the plugin. */ protected static void addClientJarPomGeneration(Project project) { - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - clientJar(MavenPublication) { - from project.components.java - artifactId = project.pluginProperties.extension.name + '-client' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', project.pluginProperties.extension.name) - root.appendNode('description', project.pluginProperties.extension.description) - root.appendNode('url', urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } - } + project.plugins.apply(MavenScmPlugin.class) + project.description = project.pluginProperties.extension.description } /** Configure the pom for the main jar of this plugin */ - protected static void configureJarPom(Project project) { - project.plugins.apply(ScmInfoPlugin.class) - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - nebula(MavenPublication) { - artifactId project.pluginProperties.extension.name - } - } - } - } protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy index 6cfe44c806833..c250d7695a832 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.plugin import org.gradle.api.Project import org.gradle.api.tasks.Input +import org.gradle.api.tasks.InputFile /** * A container for plugin properties that will be written to the plugin descriptor, for easy @@ -55,18 +56,39 @@ class PluginPropertiesExtension { boolean requiresKeystore = false /** A license file that should be included in the built plugin zip. */ - @Input - File licenseFile = null + private File licenseFile = null /** * A notice file that should be included in the built plugin zip. This will be * extended with notices from the {@code licenses/} directory. */ - @Input - File noticeFile = null + private File noticeFile = null + + Project project = null PluginPropertiesExtension(Project project) { name = project.name version = project.version + this.project = project + } + + @InputFile + File getLicenseFile() { + return licenseFile + } + + void setLicenseFile(File licenseFile) { + project.ext.licenseFile = licenseFile + this.licenseFile = licenseFile + } + + @InputFile + File getNoticeFile() { + return noticeFile + } + + void setNoticeFile(File noticeFile) { + project.ext.noticeFile = noticeFile + this.noticeFile = noticeFile } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 8e913153f05ad..9588f77a71db7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -23,7 +23,6 @@ import org.gradle.api.InvalidUserDataException import org.gradle.api.Task import org.gradle.api.tasks.Copy import org.gradle.api.tasks.OutputFile - /** * Creates a plugin descriptor. */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index 656d5e0d35a9e..119a02764994c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -19,10 +19,11 @@ package org.elasticsearch.gradle.precommit +import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.LoggedExec import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.Classpath import org.gradle.api.tasks.OutputFile - /** * Runs CheckJarHell on a classpath. */ @@ -34,11 +35,18 @@ public class JarHellTask extends LoggedExec { * inputs (ie the jars/class files). */ @OutputFile - File successMarker = new File(project.buildDir, 'markers/jarHell') + File successMarker + + @Classpath + FileCollection classpath public JarHellTask() { + successMarker = new File(project.buildDir, 'markers/jarHell-' + getName()) project.afterEvaluate { FileCollection classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + classpath += project.configurations.bundle + } inputs.files(classpath) dependsOn(classpath) description = "Runs CheckJarHell on ${classpath}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 598d82d5d9aa7..06557d4ccfdb7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,17 +18,12 @@ */ package org.elasticsearch.gradle.precommit -import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis -import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.gradle.api.Project import org.gradle.api.Task -import org.gradle.api.file.FileCollection +import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.quality.Checkstyle -import org.gradle.api.tasks.JavaExec -import org.gradle.api.tasks.StopExecutionException - /** * Validation tasks which should be run before committing. These run before tests. */ @@ -36,20 +31,22 @@ class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ public static Task create(Project project, boolean includeDependencyLicenses) { + project.configurations.create("forbiddenApisCliJar") + project.dependencies { + forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5') + } + List precommitTasks = [ - configureForbiddenApis(project), configureCheckstyle(project), + configureForbiddenApisCli(project), configureNamingConventions(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('filepermissions', FilePermissionsTask.class), - project.tasks.create('jarHell', JarHellTask.class), - project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) + configureJarHell(project), + configureThirdPartyAudit(project) ] - // Configure it but don't add it as a dependency yet - configureForbiddenApisCli(project) - // tasks with just tests don't need dependency licenses, so this flag makes adding // the task optional if (includeDependencyLicenses) { @@ -83,72 +80,61 @@ class PrecommitTasks { return project.tasks.create(precommitOptions) } - private static Task configureForbiddenApis(Project project) { - project.pluginManager.apply(ForbiddenApisPlugin.class) - project.forbiddenApis { - failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out'] - signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), - getClass().getResource('/forbidden/es-all-signatures.txt')] - suppressAnnotations = ['**.SuppressForbidden'] - } - project.tasks.withType(CheckForbiddenApis) { - // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: - if (name.endsWith('Test')) { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] - } else { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-server-signatures.txt') ] - } - } - Task forbiddenApis = project.tasks.findByName('forbiddenApis') - forbiddenApis.group = "" // clear group, so this does not show up under verification tasks - - return forbiddenApis + private static Task configureJarHell(Project project) { + Task task = project.tasks.create('jarHell', JarHellTask.class) + task.classpath = project.sourceSets.test.runtimeClasspath + return task } - private static Task configureForbiddenApisCli(Project project) { - project.configurations.create("forbiddenApisCliJar") - project.dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.5' + private static Task configureThirdPartyAudit(Project project) { + ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) + ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') + thirdPartyAuditTask.configure { + dependsOn(buildResources) + signatureFile = buildResources.copy("forbidden/third-party-audit.txt") + javaHome = project.runtimeJavaHome + targetCompatibility = project.runtimeJavaVersion } - Task forbiddenApisCli = project.tasks.create('forbiddenApisCli') + return thirdPartyAuditTask + } - project.sourceSets.forEach { sourceSet -> + private static Task configureForbiddenApisCli(Project project) { + Task forbiddenApisCli = project.tasks.create('forbiddenApis') + project.sourceSets.all { sourceSet -> forbiddenApisCli.dependsOn( - project.tasks.create(sourceSet.getTaskName('forbiddenApisCli', null), JavaExec) { + project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') dependsOn(buildResources) - classpath = project.files( - project.configurations.forbiddenApisCliJar, - sourceSet.compileClasspath, - sourceSet.runtimeClasspath + it.sourceSet = sourceSet + javaHome = project.runtimeJavaHome + targetCompatibility = project.compilerJavaVersion + bundledSignatures = [ + "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out" + ] + signaturesFiles = project.files( + buildResources.copy("forbidden/jdk-signatures.txt"), + buildResources.copy("forbidden/es-all-signatures.txt") ) - main = 'de.thetaphi.forbiddenapis.cli.CliMain' - executable = "${project.runtimeJavaHome}/bin/java" - args "-b", 'jdk-unsafe-1.8' - args "-b", 'jdk-deprecated-1.8' - args "-b", 'jdk-non-portable' - args "-b", 'jdk-system-out' - args "-f", buildResources.copy("forbidden/jdk-signatures.txt") - args "-f", buildResources.copy("forbidden/es-all-signatures.txt") - args "--suppressannotation", '**.SuppressForbidden' + suppressAnnotations = ['**.SuppressForbidden'] if (sourceSet.name == 'test') { - args "-f", buildResources.copy("forbidden/es-test-signatures.txt") - args "-f", buildResources.copy("forbidden/http-signatures.txt") + signaturesFiles += project.files( + buildResources.copy("forbidden/es-test-signatures.txt"), + buildResources.copy("forbidden/http-signatures.txt") + ) } else { - args "-f", buildResources.copy("forbidden/es-server-signatures.txt") + signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt")) } dependsOn sourceSet.classesTaskName - doFirst { - // Forbidden APIs expects only existing dirs, and requires at least one - FileCollection existingOutputs = sourceSet.output.classesDirs - .filter { it.exists() } - if (existingOutputs.isEmpty()) { - throw new StopExecutionException("${sourceSet.name} has no outputs") - } - existingOutputs.forEach { args "-d", it } + classesDirs = sourceSet.output.classesDirs + ext.replaceSignatureFiles = { String... names -> + signaturesFiles = project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) + } + ext.addSignatureFiles = { String... names -> + signaturesFiles += project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) } } ) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy deleted file mode 100644 index d6babbbfbb8b2..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.precommit; - -import org.apache.tools.ant.BuildEvent; -import org.apache.tools.ant.BuildException; -import org.apache.tools.ant.BuildListener; -import org.apache.tools.ant.BuildLogger; -import org.apache.tools.ant.DefaultLogger; -import org.apache.tools.ant.Project; -import org.elasticsearch.gradle.AntTask; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.OutputFile - -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Basic static checking to keep tabs on third party JARs - */ -public class ThirdPartyAuditTask extends AntTask { - - // patterns for classes to exclude, because we understand their issues - private List excludes = []; - - /** - * Input for the task. Set javadoc for {#link getJars} for more. Protected - * so the afterEvaluate closure in the constructor can write it. - */ - protected FileCollection jars; - - /** - * Classpath against which to run the third patty audit. Protected so the - * afterEvaluate closure in the constructor can write it. - */ - protected FileCollection classpath; - - /** - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). - */ - @OutputFile - File successMarker = new File(project.buildDir, 'markers/thirdPartyAudit') - - ThirdPartyAuditTask() { - // we depend on this because its the only reliable configuration - // this probably makes the build slower: gradle you suck here when it comes to configurations, you pay the price. - dependsOn(project.configurations.testCompile); - description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"; - - project.afterEvaluate { - Configuration configuration = project.configurations.findByName('runtime') - Configuration compileOnly = project.configurations.findByName('compileOnly') - if (configuration == null) { - // some projects apparently do not have 'runtime'? what a nice inconsistency, - // basically only serves to waste time in build logic! - configuration = project.configurations.findByName('testCompile') - } - assert configuration != null - if (compileOnly == null) { - classpath = configuration - } else { - classpath = project.files(configuration, compileOnly) - } - - // we only want third party dependencies. - jars = configuration.fileCollection({ dependency -> - dependency.group.startsWith("org.elasticsearch") == false - }); - - // we don't want provided dependencies, which we have already scanned. e.g. don't - // scan ES core's dependencies for every single plugin - if (compileOnly != null) { - jars -= compileOnly - } - inputs.files(jars) - onlyIf { jars.isEmpty() == false } - } - } - - /** - * classes that should be excluded from the scan, - * e.g. because we know what sheisty stuff those particular classes are up to. - */ - public void setExcludes(String[] classes) { - for (String s : classes) { - if (s.indexOf('*') != -1) { - throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!"); - } - } - excludes = classes.sort(); - } - - /** - * Returns current list of exclusions. - */ - @Input - public List getExcludes() { - return excludes; - } - - // yes, we parse Uwe Schindler's errors to find missing classes, and to keep a continuous audit. Just don't let him know! - static final Pattern MISSING_CLASS_PATTERN = - Pattern.compile(/WARNING: The referenced class '(.*)' cannot be loaded\. Please fix the classpath\!/); - - static final Pattern VIOLATION_PATTERN = - Pattern.compile(/\s\sin ([a-zA-Z0-9\$\.]+) \(.*\)/); - - // we log everything and capture errors and handle them with our whitelist - // this is important, as we detect stale whitelist entries, workaround forbidden apis bugs, - // and it also allows whitelisting missing classes! - static class EvilLogger extends DefaultLogger { - final Set missingClasses = new TreeSet<>(); - final Map> violations = new TreeMap<>(); - String previousLine = null; - - @Override - public void messageLogged(BuildEvent event) { - if (event.getTask().getClass() == de.thetaphi.forbiddenapis.ant.AntTask.class) { - if (event.getPriority() == Project.MSG_WARN) { - Matcher m = MISSING_CLASS_PATTERN.matcher(event.getMessage()); - if (m.matches()) { - missingClasses.add(m.group(1).replace('.', '/') + ".class"); - } - - // Reset the priority of the event to DEBUG, so it doesn't - // pollute the build output - event.setMessage(event.getMessage(), Project.MSG_DEBUG); - } else if (event.getPriority() == Project.MSG_ERR) { - Matcher m = VIOLATION_PATTERN.matcher(event.getMessage()); - if (m.matches()) { - String violation = previousLine + '\n' + event.getMessage(); - String clazz = m.group(1).replace('.', '/') + ".class"; - List current = violations.get(clazz); - if (current == null) { - current = new ArrayList<>(); - violations.put(clazz, current); - } - current.add(violation); - } - previousLine = event.getMessage(); - } - } - super.messageLogged(event); - } - } - - @Override - protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { - DefaultLogger log = new EvilLogger(); - log.errorPrintStream = stream; - log.outputPrintStream = stream; - log.messageOutputLevel = outputLevel; - return log; - } - - @Override - protected void runAnt(AntBuilder ant) { - ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask); - - // print which jars we are going to scan, always - // this is not the time to try to be succinct! Forbidden will print plenty on its own! - Set names = new TreeSet<>(); - for (File jar : jars) { - names.add(jar.getName()); - } - - // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, - // and then remove our temp dir afterwards. don't complain: try it yourself. - // we don't use gradle temp dir handling, just google it, or try it yourself. - - File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit'); - - // clean up any previous mess (if we failed), then unzip everything to one directory - ant.delete(dir: tmpDir.getAbsolutePath()); - tmpDir.mkdirs(); - for (File jar : jars) { - ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()); - } - - // convert exclusion class names to binary file names - List excludedFiles = excludes.collect {it.replace('.', '/') + ".class"} - Set excludedSet = new TreeSet<>(excludedFiles); - - // jarHellReprise - Set sheistySet = getSheistyClasses(tmpDir.toPath()); - - try { - ant.thirdPartyAudit(failOnUnsupportedJava: false, - failOnMissingClasses: false, - classpath: classpath.asPath) { - fileset(dir: tmpDir) - signatures { - string(value: getClass().getResourceAsStream('/forbidden/third-party-audit.txt').getText('UTF-8')) - } - } - } catch (BuildException ignore) {} - - EvilLogger evilLogger = null; - for (BuildListener listener : ant.project.getBuildListeners()) { - if (listener instanceof EvilLogger) { - evilLogger = (EvilLogger) listener; - break; - } - } - assert evilLogger != null; - - // keep our whitelist up to date - Set bogusExclusions = new TreeSet<>(excludedSet); - bogusExclusions.removeAll(sheistySet); - bogusExclusions.removeAll(evilLogger.missingClasses); - bogusExclusions.removeAll(evilLogger.violations.keySet()); - if (!bogusExclusions.isEmpty()) { - throw new IllegalStateException("Invalid exclusions, nothing is wrong with these classes: " + bogusExclusions); - } - - // don't duplicate classes with the JDK - sheistySet.removeAll(excludedSet); - if (!sheistySet.isEmpty()) { - throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet); - } - - // don't allow a broken classpath - evilLogger.missingClasses.removeAll(excludedSet); - if (!evilLogger.missingClasses.isEmpty()) { - throw new IllegalStateException("CLASSES ARE MISSING! " + evilLogger.missingClasses); - } - - // don't use internal classes - evilLogger.violations.keySet().removeAll(excludedSet); - if (!evilLogger.violations.isEmpty()) { - throw new IllegalStateException("VIOLATIONS WERE FOUND! " + evilLogger.violations); - } - - // clean up our mess (if we succeed) - ant.delete(dir: tmpDir.getAbsolutePath()); - - successMarker.setText("", 'UTF-8') - } - - /** - * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! - */ - private Set getSheistyClasses(Path root) { - // system.parent = extensions loader. - // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). - // but groovy/gradle needs to work at all first! - ClassLoader ext = ClassLoader.getSystemClassLoader().getParent(); - assert ext != null; - - Set sheistySet = new TreeSet<>(); - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String entry = root.relativize(file).toString().replace('\\', '/'); - if (entry.endsWith(".class")) { - if (ext.getResource(entry) != null) { - sheistySet.add(entry); - } - } - return FileVisitResult.CONTINUE; - } - }); - return sheistySet; - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 0dd56b863324f..aaf4e468182a9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -177,6 +177,12 @@ class NodeInfo { javaVersion = 8 } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { javaVersion = 9 + } else if (project.inFipsJvm && nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.4.0")) { + /* + * Elasticsearch versions before 6.4.0 cannot be run in a FIPS-140 JVM. If we're running + * bwc tests in a FIPS-140 JVM, ensure that the pre v6.4.0 nodes use a Java 10 JVM instead. + */ + javaVersion = 10 } args.addAll("-E", "node.portsfile=true") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index d2101c48aabdc..2838849981a1b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -31,6 +31,7 @@ import org.gradle.api.provider.Provider import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState +import org.gradle.plugins.ide.idea.IdeaPlugin import java.nio.charset.StandardCharsets import java.nio.file.Files @@ -243,10 +244,12 @@ public class RestIntegTestTask extends DefaultTask { } } } - project.idea { - module { - if (scopes.TEST != null) { - scopes.TEST.plus.add(project.configurations.restSpec) + if (project.plugins.hasPlugin(IdeaPlugin)) { + project.idea { + module { + if (scopes.TEST != null) { + scopes.TEST.plus.add(project.configurations.restSpec) + } } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index a2484e9c5fce0..a5d3b41339db6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -53,6 +53,8 @@ public class StandaloneRestTestPlugin implements Plugin { // only setup tests to build project.sourceSets.create('test') + // create a compileOnly configuration as others might expect it + project.configurations.create("compileOnly") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java new file mode 100644 index 0000000000000..6d256ba044971 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +import org.gradle.api.Action; +import org.gradle.api.Project; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.WorkResult; +import org.gradle.process.ExecResult; +import org.gradle.process.JavaExecSpec; + +import java.io.File; + +/** + * Facilitate access to Gradle services without a direct dependency on Project. + * + * In a future release Gradle will offer service injection, this adapter plays that role until that time. + * It exposes the service methods that are part of the public API as the classes implementing them are not. + * Today service injection is not available for + * extensions. + * + * Everything exposed here must be thread safe. That is the very reason why project is not passed in directly. + */ +public class GradleServicesAdapter { + + public final Project project; + + public GradleServicesAdapter(Project project) { + this.project = project; + } + + public static GradleServicesAdapter getInstance(Project project) { + return new GradleServicesAdapter(project); + } + + public WorkResult copy(Action action) { + return project.copy(action); + } + + public WorkResult sync(Action action) { + return project.sync(action); + } + + public ExecResult javaexec(Action action) { + return project.javaexec(action); + } + + public FileTree zipTree(File zipPath) { + return project.zipTree(zipPath); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java similarity index 72% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java index b1e4c6c0d4e54..c926e70b3f765 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/package-info.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -16,9 +16,21 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.gradle; -/** - * Request and Response objects for the default distribution's Machine - * Learning APIs. - */ -package org.elasticsearch.protocol.xpack.ml; +public enum Distribution { + + INTEG_TEST("integ-test-zip"), + ZIP("zip"), + ZIP_OSS("zip-oss"); + + private final String name; + + Distribution(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java index 03c18f54e67ef..4af104093a5cb 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java @@ -35,6 +35,7 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -105,7 +106,7 @@ public void doExport() { if (is == null) { throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); } - Files.copy(is, destination); + Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java new file mode 100644 index 0000000000000..60de1981f9827 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class JdkJarHellCheck { + + private Set detected = new HashSet<>(); + + private void scanForJDKJarHell(Path root) throws IOException { + // system.parent = extensions loader. + // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!) + ClassLoader ext = ClassLoader.getSystemClassLoader().getParent(); + assert ext != null; + + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + String entry = root.relativize(file).toString().replace('\\', '/'); + if (entry.endsWith(".class")) { + if (ext.getResource(entry) != null) { + detected.add( + entry + .replace("/", ".") + .replace(".class","") + ); + } + } + return FileVisitResult.CONTINUE; + } + }); + } + + public Set getDetected() { + return Collections.unmodifiableSet(detected); + } + + public static void main(String[] argv) throws IOException { + JdkJarHellCheck checker = new JdkJarHellCheck(); + for (String location : argv) { + Path path = Paths.get(location); + if (Files.exists(path) == false) { + throw new IllegalArgumentException("Path does not exist: " + path); + } + checker.scanForJDKJarHell(path); + } + if (checker.getDetected().isEmpty()) { + System.exit(0); + } else { + checker.getDetected().forEach(System.out::println); + System.exit(1); + } + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java new file mode 100644 index 0000000000000..779e7b61ed9ce --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import groovy.lang.Closure; +import org.elasticsearch.GradleServicesAdapter; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.execution.TaskActionListener; +import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.TaskState; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ClusterformationPlugin implements Plugin { + + public static final String LIST_TASK_NAME = "listElasticSearchClusters"; + public static final String EXTENSION_NAME = "elasticSearchClusters"; + + private final Logger logger = Logging.getLogger(ClusterformationPlugin.class); + + @Override + public void apply(Project project) { + NamedDomainObjectContainer container = project.container( + ElasticsearchNode.class, + (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + ); + project.getExtensions().add(EXTENSION_NAME, container); + + Task listTask = project.getTasks().create(LIST_TASK_NAME); + listTask.setGroup("ES cluster formation"); + listTask.setDescription("Lists all ES clusters configured for this project"); + listTask.doLast((Task task) -> + container.forEach((ElasticsearchConfiguration cluster) -> + logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) + ) + ); + + Map> taskToCluster = new HashMap<>(); + + // register an extension for all current and future tasks, so that any task can declare that it wants to use a + // specific cluster. + project.getTasks().all((Task task) -> + task.getExtensions().findByType(ExtraPropertiesExtension.class) + .set( + "useCluster", + new Closure(this, this) { + public void doCall(ElasticsearchConfiguration conf) { + taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); + } + }) + ); + + project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> + taskExecutionGraph.getAllTasks() + .forEach(task -> + taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim) + ) + ); + project.getGradle().addListener( + new TaskActionListener() { + @Override + public void beforeActions(Task task) { + // we only start the cluster before the actions, so we'll not start it if the task is up-to-date + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start); + } + @Override + public void afterActions(Task task) {} + } + ); + project.getGradle().addListener( + new TaskExecutionListener() { + @Override + public void afterExecute(Task task, TaskState state) { + // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the + // cluster to start. + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop); + } + @Override + public void beforeExecute(Task task) {} + } + ); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java new file mode 100644 index 0000000000000..913d88e9fa11b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; + +import java.util.concurrent.Future; + +public interface ElasticsearchConfiguration { + String getName(); + + Version getVersion(); + + void setVersion(Version version); + + default void setVersion(String version) { + setVersion(Version.fromString(version)); + } + + Distribution getDistribution(); + + void setDistribution(Distribution distribution); + + void claim(); + + Future start(); + + void unClaimAndStop(); +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java new file mode 100644 index 0000000000000..8b78fc2b627cb --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.GradleServicesAdapter; +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Objects; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ElasticsearchNode implements ElasticsearchConfiguration { + + private final String name; + private final GradleServicesAdapter services; + private final AtomicInteger noOfClaims = new AtomicInteger(); + private final AtomicBoolean started = new AtomicBoolean(false); + private final Logger logger = Logging.getLogger(ElasticsearchNode.class); + + private Distribution distribution; + private Version version; + + public ElasticsearchNode(String name, GradleServicesAdapter services) { + this.name = name; + this.services = services; + } + + @Override + public String getName() { + return name; + } + + @Override + public Version getVersion() { + return version; + } + + @Override + public void setVersion(Version version) { + checkNotRunning(); + this.version = version; + } + + @Override + public Distribution getDistribution() { + return distribution; + } + + @Override + public void setDistribution(Distribution distribution) { + checkNotRunning(); + this.distribution = distribution; + } + + @Override + public void claim() { + noOfClaims.incrementAndGet(); + } + + /** + * Start the cluster if not running. Does nothing if the cluster is already running. + * + * @return future of thread running in the background + */ + @Override + public Future start() { + if (started.getAndSet(true)) { + logger.lifecycle("Already started cluster: {}", name); + } else { + logger.lifecycle("Starting cluster: {}", name); + } + return null; + } + + /** + * Stops a running cluster if it's not claimed. Does nothing otherwise. + */ + @Override + public void unClaimAndStop() { + int decrementedClaims = noOfClaims.decrementAndGet(); + if (decrementedClaims > 0) { + logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); + return; + } + if (started.get() == false) { + logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); + return; + } + logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + } + + private void checkNotRunning() { + if (started.get()) { + throw new IllegalStateException("Configuration can not be altered while running "); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ElasticsearchNode that = (ElasticsearchNode) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java new file mode 100644 index 0000000000000..aaa9564b0dc0b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import org.gradle.api.DefaultTask; +import org.gradle.api.JavaVersion; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileCollection; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.JavaExecSpec; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +public class ForbiddenApisCliTask extends DefaultTask { + + private final Logger logger = Logging.getLogger(ForbiddenApisCliTask.class); + private FileCollection signaturesFiles; + private List signatures = new ArrayList<>(); + private Set bundledSignatures = new LinkedHashSet<>(); + private Set suppressAnnotations = new LinkedHashSet<>(); + private JavaVersion targetCompatibility; + private FileCollection classesDirs; + private SourceSet sourceSet; + // This needs to be an object so it can hold Groovy GStrings + private Object javaHome; + + @Input + public JavaVersion getTargetCompatibility() { + return targetCompatibility; + } + + public void setTargetCompatibility(JavaVersion targetCompatibility) { + if (targetCompatibility.compareTo(JavaVersion.VERSION_1_10) > 0) { + logger.warn( + "Target compatibility is set to {} but forbiddenapis only supports up to 10. Will cap at 10.", + targetCompatibility + ); + this.targetCompatibility = JavaVersion.VERSION_1_10; + } else { + this.targetCompatibility = targetCompatibility; + } + } + + @OutputFile + public File getMarkerFile() { + return new File( + new File(getProject().getBuildDir(), "precommit"), + getName() + ); + } + + @InputFiles + @SkipWhenEmpty + public FileCollection getClassesDirs() { + return classesDirs.filter(File::exists); + } + + public void setClassesDirs(FileCollection classesDirs) { + this.classesDirs = classesDirs; + } + + @InputFiles + public FileCollection getSignaturesFiles() { + return signaturesFiles; + } + + public void setSignaturesFiles(FileCollection signaturesFiles) { + this.signaturesFiles = signaturesFiles; + } + + @Input + public List getSignatures() { + return signatures; + } + + public void setSignatures(List signatures) { + this.signatures = signatures; + } + + @Input + public Set getBundledSignatures() { + return bundledSignatures; + } + + public void setBundledSignatures(Set bundledSignatures) { + this.bundledSignatures = bundledSignatures; + } + + @Input + public Set getSuppressAnnotations() { + return suppressAnnotations; + } + + public void setSuppressAnnotations(Set suppressAnnotations) { + this.suppressAnnotations = suppressAnnotations; + } + + @InputFiles + public FileCollection getClassPathFromSourceSet() { + return getProject().files( + sourceSet.getCompileClasspath(), + sourceSet.getRuntimeClasspath() + ); + } + + public void setSourceSet(SourceSet sourceSet) { + this.sourceSet = sourceSet; + } + + @InputFiles + public Configuration getForbiddenAPIsConfiguration() { + return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + } + + @Input + public Object getJavaHome() { + return javaHome; + } + + public void setJavaHome(Object javaHome) { + this.javaHome = javaHome; + } + + @TaskAction + public void runForbiddenApisAndWriteMarker() throws IOException { + getProject().javaexec((JavaExecSpec spec) -> { + spec.classpath( + getForbiddenAPIsConfiguration(), + getClassPathFromSourceSet() + ); + spec.setExecutable(getJavaHome() + "/bin/java"); + spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); + // build the command line + getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath())); + getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation)); + getBundledSignatures().forEach(bundled -> { + // there's no option for target compatibility so we have to interpret it + final String prefix; + if (bundled.equals("jdk-system-out") || + bundled.equals("jdk-reflection") || + bundled.equals("jdk-non-portable")) { + prefix = ""; + } else { + prefix = "-" + ( + getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ? + getTargetCompatibility().getMajorVersion() : + "1." + getTargetCompatibility().getMajorVersion()) + ; + } + spec.args("-b", bundled + prefix); + } + ); + getClassesDirs().forEach(dir -> + spec.args("-d", dir) + ); + }); + Files.write(getMarkerFile().toPath(), Collections.emptyList()); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java new file mode 100644 index 0000000000000..7e4766ada6541 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -0,0 +1,309 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import org.apache.commons.io.output.NullOutputStream; +import org.elasticsearch.gradle.JdkJarHellCheck; +import org.elasticsearch.test.NamingConventionsCheck; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileCollection; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.StopExecutionException; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecResult; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class ThirdPartyAuditTask extends DefaultTask { + + private static final Pattern MISSING_CLASS_PATTERN = Pattern.compile( + "WARNING: The referenced class '(.*)' cannot be loaded\\. Please fix the classpath!" + ); + + private static final Pattern VIOLATION_PATTERN = Pattern.compile( + "\\s\\sin ([a-zA-Z0-9$.]+) \\(.*\\)" + ); + + /** + * patterns for classes to exclude, because we understand their issues + */ + private Set excludes = new TreeSet<>(); + + private File signatureFile; + + private String javaHome; + + private JavaVersion targetCompatibility; + + @Input + public JavaVersion getTargetCompatibility() { + return targetCompatibility; + } + + public void setTargetCompatibility(JavaVersion targetCompatibility) { + this.targetCompatibility = targetCompatibility; + } + + @InputFiles + public Configuration getForbiddenAPIsConfiguration() { + return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + } + + @InputFile + public File getSignatureFile() { + return signatureFile; + } + + public void setSignatureFile(File signatureFile) { + this.signatureFile = signatureFile; + } + + @InputFiles + public Configuration getRuntimeConfiguration() { + Configuration runtime = getProject().getConfigurations().findByName("runtime"); + if (runtime == null) { + return getProject().getConfigurations().getByName("testCompile"); + } + return runtime; + } + + @Input + public String getJavaHome() { + return javaHome; + } + + public void setJavaHome(String javaHome) { + this.javaHome = javaHome; + } + + @InputFiles + public Configuration getCompileOnlyConfiguration() { + return getProject().getConfigurations().getByName("compileOnly"); + } + + @OutputDirectory + public File getJarExpandDir() { + return new File( + new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), + getName() + ); + } + + public void setExcludes(String... classes) { + excludes.clear(); + for (String each : classes) { + if (each.indexOf('*') != -1) { + throw new IllegalArgumentException("illegal third party audit exclusion: '" + each + "', wildcards are not permitted!"); + } + excludes.add(each); + } + } + + @Input + public Set getExcludes() { + return Collections.unmodifiableSet(excludes); + } + + @TaskAction + public void runThirdPartyAudit() throws IOException { + FileCollection jars = getJarsToScan(); + + extractJars(jars); + + final String forbiddenApisOutput = runForbiddenAPIsCli(); + + final Set missingClasses = new TreeSet<>(); + Matcher missingMatcher = MISSING_CLASS_PATTERN.matcher(forbiddenApisOutput); + while (missingMatcher.find()) { + missingClasses.add(missingMatcher.group(1)); + } + + final Set violationsClasses = new TreeSet<>(); + Matcher violationMatcher = VIOLATION_PATTERN.matcher(forbiddenApisOutput); + while (violationMatcher.find()) { + violationsClasses.add(violationMatcher.group(1)); + } + + Set jdkJarHellClasses = runJdkJarHellCheck(); + + assertNoPointlessExclusions(missingClasses, violationsClasses, jdkJarHellClasses); + + assertNoMissingAndViolations(missingClasses, violationsClasses); + + assertNoJarHell(jdkJarHellClasses); + } + + private void extractJars(FileCollection jars) { + File jarExpandDir = getJarExpandDir(); + // We need to clean up to make sure old dependencies don't linger + getProject().delete(jarExpandDir); + jars.forEach(jar -> + getProject().copy(spec -> { + spec.from(getProject().zipTree(jar)); + spec.into(jarExpandDir); + // Exclude classes for multi release jars above target + for (int i = Integer.parseInt(targetCompatibility.getMajorVersion()) + 1; + i <= Integer.parseInt(JavaVersion.VERSION_HIGHER.getMajorVersion()); + i++ + ) { + spec.exclude("META-INF/versions/" + i + "/**"); + } + }) + ); + } + + private void assertNoJarHell(Set jdkJarHellClasses) { + jdkJarHellClasses.removeAll(excludes); + if (jdkJarHellClasses.isEmpty() == false) { + throw new IllegalStateException("Jar Hell with the JDK:" + formatClassList(jdkJarHellClasses)); + } + } + + private void assertNoMissingAndViolations(Set missingClasses, Set violationsClasses) { + missingClasses.removeAll(excludes); + violationsClasses.removeAll(excludes); + String missingText = formatClassList(missingClasses); + String violationsText = formatClassList(violationsClasses); + if (missingText.isEmpty() && violationsText.isEmpty()) { + getLogger().info("Third party audit passed successfully"); + } else { + throw new IllegalStateException( + "Audit of third party dependencies failed:\n" + + (missingText.isEmpty() ? "" : "Missing classes:\n" + missingText) + + (violationsText.isEmpty() ? "" : "Classes with violations:\n" + violationsText) + ); + } + } + + private void assertNoPointlessExclusions(Set missingClasses, Set violationsClasses, Set jdkJarHellClasses) { + // keep our whitelist up to date + Set bogusExclusions = new TreeSet<>(excludes); + bogusExclusions.removeAll(missingClasses); + bogusExclusions.removeAll(jdkJarHellClasses); + bogusExclusions.removeAll(violationsClasses); + if (bogusExclusions.isEmpty() == false) { + throw new IllegalStateException( + "Invalid exclusions, nothing is wrong with these classes: " + formatClassList(bogusExclusions) + ); + } + } + + private String runForbiddenAPIsCli() throws IOException { + ByteArrayOutputStream errorOut = new ByteArrayOutputStream(); + getProject().javaexec(spec -> { + spec.setExecutable(javaHome + "/bin/java"); + spec.classpath( + getForbiddenAPIsConfiguration(), + getRuntimeConfiguration(), + getCompileOnlyConfiguration() + ); + spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); + spec.args( + "-f", getSignatureFile().getAbsolutePath(), + "-d", getJarExpandDir(), + "--allowmissingclasses" + ); + spec.setErrorOutput(errorOut); + if (getLogger().isInfoEnabled() == false) { + spec.setStandardOutput(new NullOutputStream()); + } + spec.setIgnoreExitValue(true); + }); + final String forbiddenApisOutput; + try (ByteArrayOutputStream outputStream = errorOut) { + forbiddenApisOutput = outputStream.toString(StandardCharsets.UTF_8.name()); + } + if (getLogger().isInfoEnabled()) { + getLogger().info(forbiddenApisOutput); + } + return forbiddenApisOutput; + } + + private FileCollection getJarsToScan() { + FileCollection jars = getRuntimeConfiguration() + .fileCollection(dep -> dep.getGroup().startsWith("org.elasticsearch") == false); + Configuration compileOnlyConfiguration = getCompileOnlyConfiguration(); + // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin + if (compileOnlyConfiguration != null) { + jars.minus(compileOnlyConfiguration); + } + if (jars.isEmpty()) { + throw new StopExecutionException("No jars to scan"); + } + return jars; + } + + private String formatClassList(Set classList) { + return classList.stream() + .map(name -> " * " + name) + .collect(Collectors.joining("\n")); + } + + private Set runJdkJarHellCheck() throws IOException { + ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); + ExecResult execResult = getProject().javaexec(spec -> { + URL location = NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation(); + if (location.getProtocol().equals("file") == false) { + throw new GradleException("Unexpected location for NamingConventionCheck class: " + location); + } + try { + spec.classpath( + location.toURI().getPath(), + getRuntimeConfiguration(), + getCompileOnlyConfiguration() + ); + } catch (URISyntaxException e) { + throw new AssertionError(e); + } + spec.setMain(JdkJarHellCheck.class.getName()); + spec.args(getJarExpandDir()); + spec.setIgnoreExitValue(true); + spec.setExecutable(javaHome + "/bin/java"); + spec.setStandardOutput(standardOut); + }); + if (execResult.getExitValue() == 0) { + return Collections.emptySet(); + } + final String jdkJarHellCheckList; + try (ByteArrayOutputStream outputStream = standardOut) { + jdkJarHellCheckList = outputStream.toString(StandardCharsets.UTF_8.name()); + } + return new TreeSet<>(Arrays.asList(jdkJarHellCheckList.split("\\r?\\n"))); + } + + +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties new file mode 100644 index 0000000000000..dfd6cd9956a58 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.clusterformation.ClusterformationPlugin diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 899dd4f5927a1..9add3349f9ea7 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -4.9 \ No newline at end of file +4.10 \ No newline at end of file diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy index ad36c84078398..f6b9cb5fc95bf 100644 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy @@ -26,7 +26,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) @@ -65,7 +65,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, null) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java new file mode 100644 index 0000000000000..aca9906701150 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.commons.io.FileUtils; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.GradleRunner; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class BuildExamplePluginsIT extends GradleIntegrationTestCase { + + private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( + Arrays.stream( + Objects.requireNonNull(System.getProperty("test.build-tools.plugin.examples")) + .split(File.pathSeparator) + ).map(File::new).collect(Collectors.toList()) + ); + + @Rule + public TemporaryFolder tmpDir = new TemporaryFolder(); + + public final File examplePlugin; + + public BuildExamplePluginsIT(File examplePlugin) { + this.examplePlugin = examplePlugin; + } + + @BeforeClass + public static void assertProjectsExist() { + assertEquals( + EXAMPLE_PLUGINS, + EXAMPLE_PLUGINS.stream().filter(File::exists).collect(Collectors.toList()) + ); + } + + @ParametersFactory + public static Iterable parameters() { + return EXAMPLE_PLUGINS + .stream() + .map(each -> new Object[] {each}) + .collect(Collectors.toList()); + } + + public void testCurrentExamplePlugin() throws IOException { + FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot()); + // just get rid of deprecation warnings + Files.write( + getTempPath("settings.gradle"), + "enableFeaturePreview('STABLE_PUBLISHING')\n".getBytes(StandardCharsets.UTF_8) + ); + + adaptBuildScriptForTest(); + + Files.write( + tmpDir.newFile("NOTICE.txt").toPath(), + "dummy test notice".getBytes(StandardCharsets.UTF_8) + ); + + GradleRunner.create() + .withProjectDir(tmpDir.getRoot()) + .withArguments("clean", "check", "-s", "-i", "--warning-mode=all", "--scan") + .withPluginClasspath() + .build(); + } + + private void adaptBuildScriptForTest() throws IOException { + // Add the local repo as a build script URL so we can pull in build-tools and apply the plugin under test + // + is ok because we have no other repo and just want to pick up latest + writeBuildScript( + "buildscript {\n" + + " repositories {\n" + + " maven {\n" + + " url = '" + getLocalTestRepoPath() + "'\n" + + " }\n" + + " }\n" + + " dependencies {\n" + + " classpath \"org.elasticsearch.gradle:build-tools:+\"\n" + + " }\n" + + "}\n" + ); + // get the original file + Files.readAllLines(getTempPath("build.gradle"), StandardCharsets.UTF_8) + .stream() + .map(line -> line + "\n") + .forEach(this::writeBuildScript); + // Add a repositories section to be able to resolve dependencies + String luceneSnapshotRepo = ""; + String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision"); + if (luceneSnapshotRepo != null) { + luceneSnapshotRepo = " maven {\n" + + " url \"http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + + " }\n"; + } + writeBuildScript("\n" + + "repositories {\n" + + " maven {\n" + + " url \"" + getLocalTestRepoPath() + "\"\n" + + " }\n" + + luceneSnapshotRepo + + "}\n" + ); + Files.delete(getTempPath("build.gradle")); + Files.move(getTempPath("build.gradle.new"), getTempPath("build.gradle")); + System.err.print("Generated build script is:"); + Files.readAllLines(getTempPath("build.gradle")).forEach(System.err::println); + } + + private Path getTempPath(String fileName) { + return new File(tmpDir.getRoot(), fileName).toPath(); + } + + private Path writeBuildScript(String script) { + try { + Path path = getTempPath("build.gradle.new"); + return Files.write( + path, + script.getBytes(StandardCharsets.UTF_8), + Files.exists(path) ? StandardOpenOption.APPEND : StandardOpenOption.CREATE_NEW + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 98fea2ea15ab6..99afd0bcbe0ae 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -40,7 +40,7 @@ public void testUpToDateWithSourcesConfigured() { .withArguments("buildResources", "-s", "-i") .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); + assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); @@ -61,8 +61,8 @@ public void testImplicitTaskDependencyCopy() { .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); - assertTaskSuccessfull(result, ":sampleCopyAll"); + assertTaskSuccessful(result, ":buildResources"); + assertTaskSuccessful(result, ":sampleCopyAll"); assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle.xml"); // This is a side effect of compile time reference assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle_suppressions.xml"); @@ -75,7 +75,7 @@ public void testImplicitTaskDependencyInputFileOfOther() { .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":sample"); + assertTaskSuccessful(result, ":sample"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java new file mode 100644 index 0000000000000..c690557537dfb --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testkit.runner.TaskOutcome; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class ClusterformationPluginIT extends GradleIntegrationTestCase { + + public void testListClusters() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("listElasticSearchClusters", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome()); + assertOutputContains( + result.getOutput(), + " * myTestCluster:" + ); + + } + + public void testUseClusterByOne() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByOneWithDryRun() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s", "--dry-run") + .withPluginClasspath() + .build(); + + assertNull(result.task(":user1")); + assertOutputDoesNotContain( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByTwo() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "user2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByUpToDateTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("upToDate1", "upToDate2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "skipped2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void tetUseClusterBySkippedAndWorkingTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "> Task :user1", + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java new file mode 100644 index 0000000000000..03f2022bc66e8 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -0,0 +1,42 @@ +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class JarHellTaskIT extends GradleIntegrationTestCase { + + public void testJarHellDetected() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("jarHell")) + .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .withPluginClasspath() + .buildAndFail(); + + assertTaskFailed(result, ":jarHell"); + assertOutputContains( + result.getOutput(), + "Exception in thread \"main\" java.lang.IllegalStateException: jar hell!", + "class: org.apache.logging.log4j.Logger" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f00ab406a6c10..a1d4b86ab760c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -9,6 +9,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -66,15 +67,24 @@ protected void assertOutputDoesNotContain(String output, String... lines) { } } - protected void assertTaskSuccessfull(BuildResult result, String taskName) { + protected void assertTaskFailed(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.FAILED); + } + + protected void assertTaskSuccessful(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + + private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { BuildTask task = result.task(taskName); if (task == null) { - fail("Expected task `" + taskName + "` to be successful, but it did not run"); + fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" + + "\n\nOutput is:\n" + result.getOutput()); } assertEquals( "Expected task to be successful but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.SUCCESS, + taskOutcome + "\n\nOutput is:\n" + result.getOutput() , + taskOutcome, task.getOutcome() ); } @@ -109,4 +119,17 @@ protected void assertBuildFileDoesNotExists(BuildResult result, String projectNa Files.exists(absPath) ); } + + protected String getLocalTestRepoPath() { + String property = System.getProperty("test.local-test-repo-path"); + Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + File file = new File(property); + assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); + if (File.separator.equals("\\")) { + // Use / on Windows too, the build script is not happy with \ + return file.getAbsolutePath().replace(File.separator, "/"); + } else { + return file.getAbsolutePath(); + } + } } diff --git a/buildSrc/src/testKit/clusterformation/build.gradle b/buildSrc/src/testKit/clusterformation/build.gradle new file mode 100644 index 0000000000000..ae9dd8a2c335c --- /dev/null +++ b/buildSrc/src/testKit/clusterformation/build.gradle @@ -0,0 +1,41 @@ +plugins { + id 'elasticsearch.clusterformation' +} + +elasticSearchClusters { + myTestCluster { + distribution = 'ZIP' + } +} + +task user1 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user1 executing" + } +} + +task user2 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user2 executing" + } +} + +task upToDate1 { + useCluster elasticSearchClusters.myTestCluster +} + +task upToDate2 { + useCluster elasticSearchClusters.myTestCluster +} + +task skipped1 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} + +task skipped2 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle new file mode 100644 index 0000000000000..17ff43fc7403b --- /dev/null +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -0,0 +1,29 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +dependencyLicenses.enabled = false +dependenciesInfo.enabled = false +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +thirdPartyAudit.enabled = false +namingConventions.enabled = false +ext.licenseFile = file("$buildDir/dummy/license") +ext.noticeFile = file("$buildDir/dummy/notice") + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +dependencies { + // Needed for the JarHell task + testCompile ("org.elasticsearch.test:framework:${versions.elasticsearch}") + // causes jar hell with local sources + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" +} diff --git a/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java new file mode 100644 index 0000000000000..a4332c664fa38 --- /dev/null +++ b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java @@ -0,0 +1,7 @@ +package org.apache.logging.log4j; + +// Jar Hell ! +public class Logger { + +} + diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 6f5eab6e1db1e..9acfc630f94f5 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -16,8 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RestIntegTestTask import org.gradle.api.internal.provider.Providers @@ -47,13 +45,13 @@ dependencies { * Everything in the "shadow" configuration is *not* copied into the * shadowJar. */ - shadow "org.elasticsearch:elasticsearch:${version}" - shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}" - shadow "org.elasticsearch.plugin:parent-join-client:${version}" - shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" - shadow "org.elasticsearch.plugin:rank-eval-client:${version}" - shadow "org.elasticsearch.plugin:lang-mustache-client:${version}" - compile project(':x-pack:protocol') + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.plugin:parent-join-client:${version}" + compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" + compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile "org.elasticsearch.plugin:lang-mustache-client:${version}" + bundle project(':x-pack:protocol') testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" @@ -75,8 +73,8 @@ dependencyLicenses { forbiddenApisMain { // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already // specified - signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] - signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] + addSignatureFiles 'http-signatures' + signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } integTestCluster { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java new file mode 100644 index 0000000000000..293105f5abeb8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + + +public class GraphClient { + private final RestHighLevelClient restHighLevelClient; + + GraphClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore, + options, GraphExploreResponse::fromXContext, emptySet()); + } + + /** + * Asynchronously executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final void exploreAsync(GraphExploreRequest graphExploreRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore, + options, GraphExploreResponse::fromXContext, listener, emptySet()); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java new file mode 100644 index 0000000000000..8a04c229de261 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.client.ml.FlushJobRequest; + +import java.io.IOException; + +import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.createEntity; + +final class MLRequestConverters { + + private MLRequestConverters() {} + + static Request putJob(PutJobRequest putJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(putJobRequest.getJob().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getJob(GetJobRequest getJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobRequest.getJobIds())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.isAllowNoJobs())); + } + + return request; + } + + static Request openJob(OpenJobRequest openJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(openJobRequest.getJobId()) + .addPathPartAsIs("_open") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(openJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request closeJob(CloseJobRequest closeJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(closeJobRequest.getJobIds())) + .addPathPartAsIs("_close") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(closeJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteJob(DeleteJobRequest deleteJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteJobRequest.getJobId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam("force", Boolean.toString(deleteJobRequest.isForce())); + + return request; + } + + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getBucketsRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("buckets") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getBucketsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request flushJob(FlushJobRequest flushJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(flushJobRequest.getJobId()) + .addPathPartAsIs("_flush") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(flushJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getJobStats(GetJobStatsRequest getJobStatsRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobStatsRequest.getJobIds())) + .addPathPartAsIs("_stats") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobStatsRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobStatsRequest.isAllowNoJobs())); + } + return request; + } + + static Request getRecords(GetRecordsRequest getRecordsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getRecordsRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("records") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getRecordsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index a3e5ba72b773f..ac44f16b80b16 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,10 +19,25 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; import java.io.IOException; import java.util.Collections; @@ -48,14 +63,14 @@ public final class MachineLearningClient { * For additional info * see ML PUT job documentation * - * @param request the PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The PutJobRequest containing the {@link org.elasticsearch.client.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object + * @return PutJobResponse with enclosed {@link org.elasticsearch.client.ml.job.config.Job} object * @throws IOException when there is a serialization issue sending the request or receiving the response */ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, - RequestConverters::putMachineLearningJob, + MLRequestConverters::putJob, options, PutJobResponse::fromXContent, Collections.emptySet()); @@ -67,19 +82,98 @@ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) thro * For additional info * see ML PUT job documentation * - * @param request the request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, - RequestConverters::putMachineLearningJob, + MLRequestConverters::putJob, options, PutJobResponse::fromXContent, listener, Collections.emptySet()); } + /** + * Gets one or more Machine Learning job configuration info. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobResponse} response object containing + * the {@link org.elasticsearch.client.ml.job.config.Job} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobResponse getJob(GetJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobResponse} upon request completion + */ + public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Deletes the given Machine Learning Job + *

+ * For additional info + * see ML Delete Job documentation + *

+ * @param request The request to delete the job + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return action acknowledgement + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteJob, + options, + DeleteJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes the given Machine Learning Job asynchronously and notifies the listener on completion + *

+ * For additional info + * see ML Delete Job documentation + *

+ * @param request The request to delete the job + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteJob, + options, + DeleteJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Opens a Machine Learning Job. * When you open a new job, it starts with an empty model. @@ -91,14 +185,14 @@ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionLis * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return response containing if the job was successfully opened or not. * @throws IOException when there is a serialization issue sending the request or receiving the response */ public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, - RequestConverters::machineLearningOpenJob, + MLRequestConverters::openJob, options, OpenJobResponse::fromXContent, Collections.emptySet()); @@ -114,16 +208,219 @@ public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) t * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ public void openJobAsync(OpenJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, - RequestConverters::machineLearningOpenJob, + MLRequestConverters::openJob, options, OpenJobResponse::fromXContent, listener, Collections.emptySet()); } + + /** + * Closes one or more Machine Learning Jobs. A job can be opened and closed multiple times throughout its lifecycle. + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response containing if the job was successfully closed or not. + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public CloseJobResponse closeJob(CloseJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Closes one or more Machine Learning Jobs asynchronously, notifies listener on completion + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetBucketsResponse getBuckets(GetBucketsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Flushes internally buffered data for the given Machine Learning Job ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public FlushJobResponse flushJob(FlushJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Flushes internally buffered data for the given Machine Learning Job asynchronously ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets usage statistics for one or more Machine Learning jobs + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobStatsResponse} response object containing + * the {@link JobStats} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobStatsResponse getJobStats(GetJobStatsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobStatsResponse} upon request completion + */ + public void getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the records for a Machine Learning Job. + *

+ * For additional info + * see ML GET records documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetRecordsResponse getRecords(GetRecordsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getRecords, + options, + GetRecordsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the records for a Machine Learning Job, notifies listener once the requested records are retrieved. + *

+ * For additional info + * see ML GET records documentation + * + * @param request the request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getRecordsAsync(GetRecordsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getRecords, + options, + GetRecordsResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 973c0ce126d37..5e74262fa20e5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -88,6 +88,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -106,16 +107,18 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -821,6 +824,48 @@ static Request clusterHealth(ClusterHealthRequest healthRequest) { return request; } + static Request reindex(ReindexRequest reindexRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRefresh(reindexRequest.isRefresh()) + .withTimeout(reindexRequest.getTimeout()) + .withWaitForActiveShards(reindexRequest.getWaitForActiveShards()); + + if (reindexRequest.getScrollTime() != null) { + params.putParam("scroll", reindexRequest.getScrollTime()); + } + request.setEntity(createEntity(reindexRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { + String endpoint = + endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRouting(updateByQueryRequest.getRouting()) + .withPipeline(updateByQueryRequest.getPipeline()) + .withRefresh(updateByQueryRequest.isRefresh()) + .withTimeout(updateByQueryRequest.getTimeout()) + .withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards()) + .withIndicesOptions(updateByQueryRequest.indicesOptions()); + if (updateByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize())); + } + if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", updateByQueryRequest.getScrollTime()); + } + if (updateByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(updateByQueryRequest.getSize())); + } + request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request rollover(RolloverRequest rolloverRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") .addPathPart(rolloverRequest.getNewIndexName()).build(); @@ -1126,6 +1171,13 @@ static Request xPackInfo(XPackInfoRequest infoRequest) { return request; } + static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException { + String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore"); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -1199,31 +1251,6 @@ static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { return request; } - static Request putMachineLearningJob(PutJobRequest putJobRequest) throws IOException { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("ml") - .addPathPartAsIs("anomaly_detectors") - .addPathPart(putJobRequest.getJob().getId()) - .build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request machineLearningOpenJob(OpenJobRequest openJobRequest) throws IOException { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("ml") - .addPathPartAsIs("anomaly_detectors") - .addPathPart(openJobRequest.getJobId()) - .addPathPartAsIs("_open") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - request.setJsonEntity(openJobRequest.toString()); - return request; - } - static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { EndpointBuilder endpointBuilder = new EndpointBuilder() .addPathPartAsIs("_xpack/migration/assistance") @@ -1235,7 +1262,7 @@ static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRe return request; } - private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { + static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } @@ -1356,11 +1383,16 @@ Params withRealtime(boolean realtime) { Params withRefresh(boolean refresh) { if (refresh) { - return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + return withRefreshPolicy(RefreshPolicy.IMMEDIATE); } return this; } + /** + * @deprecated If creating a new HLRC ReST API call, use {@link RefreshPolicy} + * instead of {@link WriteRequest.RefreshPolicy} from the server project + */ + @Deprecated Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { return putParam("refresh", refreshPolicy.getValue()); @@ -1368,6 +1400,13 @@ Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { return this; } + Params withRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + Params withRetryOnConflict(int retryOnConflict) { if (retryOnConflict > 0) { return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index e705ca12806ba..6e3c5a6fb831e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -65,6 +64,9 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalResponse; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; @@ -177,6 +179,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; @@ -209,6 +212,7 @@ public class RestHighLevelClient implements Closeable { private final TasksClient tasksClient = new TasksClient(this); private final XPackClient xPackClient = new XPackClient(this); private final WatcherClient watcherClient = new WatcherClient(this); + private final GraphClient graphClient = new GraphClient(this); private final LicenseClient licenseClient = new LicenseClient(this); private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); @@ -324,6 +328,16 @@ public final XPackClient xpack() { * Watcher APIs on elastic.co for more information. */ public WatcherClient watcher() { return watcherClient; } + + /** + * Provides methods for accessing the Elastic Licensed Graph explore API that + * is shipped with the default distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * Graph API on elastic.co for more information. + */ + public GraphClient graph() { return graphClient; } /** * Provides methods for accessing the Elastic Licensed Licensing APIs that @@ -384,6 +398,62 @@ public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, Act performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); } + /** + * Executes a reindex request. + * See Reindex API on elastic.co + * @param reindexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes a reindex request. + * See Reindex API on elastic.co + * @param reindexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + + /** + * Executes a update by query request. + * See + * Update By Query API on elastic.co + * @param updateByQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes an update by query request. + * See + * Update By Query API on elastic.co + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void updateByQueryAsync(UpdateByQueryRequest reindexRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -949,6 +1019,11 @@ public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesReque FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, RequestOptions options, @@ -958,15 +1033,58 @@ protected final Resp performRequestAndParseEnt response -> parseEntity(response.getEntity(), entityParser), ignores); } + /** + * Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser. + */ + protected final Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + Set ignores) throws IOException { + return performRequest(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), ignores); + } + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequest(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores) throws IOException { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { throw validationException; } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Defines a helper method for performing a request. + */ + protected final Resp performRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); + } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Provides common functionality for performing a request. + */ + private Resp internalPerformRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { Request req = requestConverter.apply(request); req.setOptions(options); Response response; @@ -994,25 +1112,75 @@ protected final Resp performRequest(Req reques } } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsyncAndParseEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + performRequestAsync(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), listener, ignores); + } + + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { performRequestAsync(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), listener, ignores); } + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { listener.onFailure(validationException); return; } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + listener.onFailure(validationException.get()); + return; + } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Provides common functionality for asynchronously performing a request. + */ + private void internalPerformRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { Request req; try { req = requestConverter.apply(request); @@ -1026,6 +1194,7 @@ protected final void performRequestAsync(Req r client.performRequestAsync(req, responseListener); } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { @@ -1108,15 +1277,6 @@ protected final Resp parseEntity(final HttpEntity entity, } } - private static RequestOptions optionsForHeaders(Header[] headers) { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - options.addHeader(header.getName(), header.getValue()); - } - return options.build(); - } - static boolean convertExistsResponse(Response response) { return response.getStatusLine().getStatusCode() == 200; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java new file mode 100644 index 0000000000000..af8fbe3e72b37 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; + +/** + * A base request for any requests that supply timeouts. + * + * Please note, any requests that use a ackTimeout should set timeout as they + * represent the same backing field on the server. + */ +public class TimedRequest implements Validatable { + + private TimeValue timeout; + private TimeValue masterTimeout; + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + + } + + public void setMasterTimeout(TimeValue masterTimeout) { + this.masterTimeout = masterTimeout; + } + + /** + * Returns the request timeout + */ + public TimeValue timeout() { + return timeout; + } + + /** + * Returns the timeout for the request to be completed on the master node + */ + public TimeValue masterNodeTimeout() { + return masterTimeout; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java new file mode 100644 index 0000000000000..fe4a1fc42cb3b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.util.Optional; + +/** + * Defines a validation layer for Requests. + */ +public interface Validatable { + /** + * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, + * or the validation was done during object construction time. A {@link ValidationException} that is not null is + * assumed to contain validation errors and will be thrown. + * + * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. + */ + default Optional validate() { + return Optional.empty(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java new file mode 100644 index 0000000000000..6b5d738d67565 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.util.ArrayList; +import java.util.List; + +/** + * Encapsulates an accumulation of validation errors + */ +public class ValidationException extends IllegalArgumentException { + private final List validationErrors = new ArrayList<>(); + + /** + * Add a new validation error to the accumulating validation errors + * @param error the error to add + */ + public void addValidationError(String error) { + validationErrors.add(error); + } + + /** + * Returns the validation errors accumulated + */ + public final List validationErrors() { + return validationErrors; + } + + @Override + public final String getMessage() { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";"); + } + return sb.toString(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java new file mode 100644 index 0000000000000..1b609797dd6fe --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/AbstractResultResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Abstract class that provides a list of results and their count. + */ +public abstract class AbstractResultResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField COUNT = new ParseField("count"); + + private final ParseField resultsField; + protected final List results; + protected final long count; + + AbstractResultResponse(ParseField resultsField, List results, long count) { + this.resultsField = Objects.requireNonNull(resultsField, + "[results_field] must not be null"); + this.results = Collections.unmodifiableList(results); + this.count = count; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(COUNT.getPreferredName(), count); + builder.field(resultsField.getPreferredName(), results); + builder.endObject(); + return builder; + } + + public long count() { + return count; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java new file mode 100644 index 0000000000000..19f3df8e4320f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.security.InvalidParameterException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request to close Machine Learning Jobs + */ +public class CloseJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_ID = new ParseField("job_id"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField FORCE = new ParseField("force"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "close_job_request", + true, a -> new CloseJobRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + JOB_ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareString((obj, val) -> obj.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareBoolean(CloseJobRequest::setForce, FORCE); + PARSER.declareBoolean(CloseJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private TimeValue timeout; + private Boolean force; + private Boolean allowNoJobs; + + /** + * Explicitly close all jobs + * + * @return a {@link CloseJobRequest} for all existing jobs + */ + public static CloseJobRequest closeAllJobsRequest(){ + return new CloseJobRequest(ALL_JOBS); + } + + CloseJobRequest(List jobIds) { + if (jobIds.isEmpty()) { + throw new InvalidParameterException("jobIds must not be empty"); + } + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Close the specified Jobs via their unique jobIds + * + * @param jobIds must be non-null and non-empty and each jobId must be non-null + */ + public CloseJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds to be closed + */ + public List getJobIds() { + return jobIds; + } + + public TimeValue getTimeout() { + return timeout; + } + + /** + * How long to wait for the close request to complete before timing out. + * + * @param timeout Default value: 30 minutes + */ + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + public Boolean isForce() { + return force; + } + + /** + * Should the closing be forced. + * + * Use to close a failed job, or to forcefully close a job which has not responded to its initial close request. + * + * @param force When {@code true} forcefully close the job. Defaults to {@code false} + */ + public void setForce(boolean force) { + this.force = force; + } + + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + * + * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, timeout, force, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobRequest that = (CloseJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(timeout, that.timeout) && + Objects.equals(force, that.force) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOB_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + } + if (force != null) { + builder.field(FORCE.getPreferredName(), force); + } + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java new file mode 100644 index 0000000000000..2ac1e0faee34f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response indicating if the Job(s) closed or not + */ +public class CloseJobResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField CLOSED = new ParseField("closed"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("close_job_response", true, (a) -> new CloseJobResponse((Boolean)a[0])); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLOSED); + } + + private boolean closed; + + public CloseJobResponse(boolean closed) { + this.closed = closed; + } + + public static CloseJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Has the job closed or not + * @return boolean value indicating the job closed status + */ + public boolean isClosed() { + return closed; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobResponse that = (CloseJobResponse) other; + return isClosed() == that.isClosed(); + } + + @Override + public int hashCode() { + return Objects.hash(isClosed()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CLOSED.getPreferredName(), closed); + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java new file mode 100644 index 0000000000000..a355f7ec659bb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobRequest.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +import java.util.Objects; + +/** + * Request to delete a Machine Learning Job via its ID + */ +public class DeleteJobRequest extends ActionRequest { + + private String jobId; + private boolean force; + + public DeleteJobRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); + } + + public String getJobId() { + return jobId; + } + + /** + * The jobId which to delete + * @param jobId unique jobId to delete, must not be null + */ + public void setJobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); + } + + public boolean isForce() { + return force; + } + + /** + * Used to forcefully delete an opened job. + * This method is quicker than closing and deleting the job. + * + * @param force When {@code true} forcefully delete an opened job. Defaults to {@code false} + */ + public void setForce(boolean force) { + this.force = force; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, force); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || obj.getClass() != getClass()) { + return false; + } + + DeleteJobRequest other = (DeleteJobRequest) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(force, other.force); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java new file mode 100644 index 0000000000000..86cafd9e09315 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteJobResponse.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response acknowledging the Machine Learning Job request + */ +public class DeleteJobResponse extends AcknowledgedResponse { + + public DeleteJobResponse(boolean acknowledged) { + super(acknowledged); + } + + public DeleteJobResponse() { + } + + public static DeleteJobResponse fromXContent(XContentParser parser) throws IOException { + AcknowledgedResponse response = AcknowledgedResponse.fromXContent(parser); + return new DeleteJobResponse(response.isAcknowledged()); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DeleteJobResponse that = (DeleteJobResponse) other; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java new file mode 100644 index 0000000000000..067851d452666 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request object to flush a given Machine Learning job. + */ +public class FlushJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField CALC_INTERIM = new ParseField("calc_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ADVANCE_TIME = new ParseField("advance_time"); + public static final ParseField SKIP_TIME = new ParseField("skip_time"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_request", (a) -> new FlushJobRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareBoolean(FlushJobRequest::setCalcInterim, CALC_INTERIM); + PARSER.declareString(FlushJobRequest::setStart, START); + PARSER.declareString(FlushJobRequest::setEnd, END); + PARSER.declareString(FlushJobRequest::setAdvanceTime, ADVANCE_TIME); + PARSER.declareString(FlushJobRequest::setSkipTime, SKIP_TIME); + } + + private final String jobId; + private Boolean calcInterim; + private String start; + private String end; + private String advanceTime; + private String skipTime; + + /** + * Create new Flush job request + * + * @param jobId The job ID of the job to flush + */ + public FlushJobRequest(String jobId) { + this.jobId = jobId; + } + + public String getJobId() { + return jobId; + } + + public boolean getCalcInterim() { + return calcInterim; + } + + /** + * When {@code true} calculates the interim results for the most recent bucket or all buckets within the latency period. + * + * @param calcInterim defaults to {@code false}. + */ + public void setCalcInterim(boolean calcInterim) { + this.calcInterim = calcInterim; + } + + public String getStart() { + return start; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, + * specifies the start of the range of buckets on which to calculate interim results. + * + * @param start the beginning of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, specifies the end of the range + * of buckets on which to calculate interim results + * + * @param end the end of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public String getAdvanceTime() { + return advanceTime; + } + + /** + * Specifies to advance to a particular time value. + * Results are generated and the model is updated for data from the specified time interval. + * + * @param advanceTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setAdvanceTime(String advanceTime) { + this.advanceTime = advanceTime; + } + + public String getSkipTime() { + return skipTime; + } + + /** + * Specifies to skip to a particular time value. + * Results are not generated and the model is not updated for data from the specified time interval. + * + * @param skipTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setSkipTime(String skipTime) { + this.skipTime = skipTime; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FlushJobRequest other = (FlushJobRequest) obj; + return Objects.equals(jobId, other.jobId) && + calcInterim == other.calcInterim && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(advanceTime, other.advanceTime) && + Objects.equals(skipTime, other.skipTime); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (calcInterim != null) { + builder.field(CALC_INTERIM.getPreferredName(), calcInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (advanceTime != null) { + builder.field(ADVANCE_TIME.getPreferredName(), advanceTime); + } + if (skipTime != null) { + builder.field(SKIP_TIME.getPreferredName(), skipTime); + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java new file mode 100644 index 0000000000000..048b07b504ae0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Response object containing flush acknowledgement and additional data + */ +public class FlushJobResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField FLUSHED = new ParseField("flushed"); + public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_response", + true, + (a) -> { + boolean flushed = (boolean) a[0]; + Date date = a[1] == null ? null : new Date((long) a[1]); + return new FlushJobResponse(flushed, date); + }); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), FLUSHED); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LAST_FINALIZED_BUCKET_END); + } + + public static FlushJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final boolean flushed; + private final Date lastFinalizedBucketEnd; + + public FlushJobResponse(boolean flushed, @Nullable Date lastFinalizedBucketEnd) { + this.flushed = flushed; + this.lastFinalizedBucketEnd = lastFinalizedBucketEnd; + } + + /** + * Was the job successfully flushed or not + */ + public boolean isFlushed() { + return flushed; + } + + /** + * Provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. + */ + @Nullable + public Date getLastFinalizedBucketEnd() { + return lastFinalizedBucketEnd; + } + + @Override + public int hashCode() { + return Objects.hash(flushed, lastFinalizedBucketEnd); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FlushJobResponse that = (FlushJobResponse) other; + return that.flushed == flushed && Objects.equals(lastFinalizedBucketEnd, that.lastFinalizedBucketEnd); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FLUSHED.getPreferredName(), flushed); + if (lastFinalizedBucketEnd != null) { + builder.timeField(LAST_FINALIZED_BUCKET_END.getPreferredName(), + LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", lastFinalizedBucketEnd.getTime()); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java new file mode 100644 index 0000000000000..f50d92d58dda5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.Result; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve buckets of a given job + */ +public class GetBucketsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField EXPAND = new ParseField("expand"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ObjectParser PARSER = new ObjectParser<>("get_buckets_request", GetBucketsRequest::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString(GetBucketsRequest::setTimestamp, Result.TIMESTAMP); + PARSER.declareBoolean(GetBucketsRequest::setExpand, EXPAND); + PARSER.declareBoolean(GetBucketsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetBucketsRequest::setStart, START); + PARSER.declareStringOrNull(GetBucketsRequest::setEnd, END); + PARSER.declareObject(GetBucketsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetBucketsRequest::setAnomalyScore, ANOMALY_SCORE); + PARSER.declareString(GetBucketsRequest::setSort, SORT); + PARSER.declareBoolean(GetBucketsRequest::setDescending, DESCENDING); + } + + private String jobId; + private String timestamp; + private Boolean expand; + private Boolean excludeInterim; + private String start; + private String end; + private PageParams pageParams; + private Double anomalyScore; + private String sort; + private Boolean descending; + + private GetBucketsRequest() {} + + /** + * Constructs a request to retrieve buckets of a given job + * @param jobId id of the job to retrieve buckets of + */ + public GetBucketsRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + /** + * Sets the timestamp of a specific bucket to be retrieved. + * @param timestamp the timestamp of a specific bucket to be retrieved + */ + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public String getTimestamp() { + return timestamp; + } + + public boolean isExpand() { + return expand; + } + + /** + * Sets the value of "expand". + * When {@code true}, buckets will be expanded to include their records. + * @param expand value of "expand" to be set + */ + public void setExpand(boolean expand) { + this.expand = expand; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim buckets will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only buckets whose timestamp is on or after the "start" value will be returned. + * @param start value of "start" to be set + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only buckets whose timestamp is before the "end" value will be returned. + * @param end value of "end" to be set + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams the paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getAnomalyScore() { + return anomalyScore; + } + + /** + * Sets the value of "anomaly_score". + * Only buckets with "anomaly_score" equal or greater will be returned. + * @param anomalyScore value of "anomaly_score". + */ + public void setAnomalyScore(double anomalyScore) { + this.anomalyScore = anomalyScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the bucket field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(boolean descending) { + this.descending = descending; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (timestamp != null) { + builder.field(Result.TIMESTAMP.getPreferredName(), timestamp); + } + if (expand != null) { + builder.field(EXPAND.getPreferredName(), expand); + } + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (anomalyScore != null) { + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, expand, excludeInterim, anomalyScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsRequest other = (GetBucketsRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(timestamp, other.timestamp) && + Objects.equals(expand, other.expand) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(anomalyScore, other.anomalyScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java new file mode 100644 index 0000000000000..de8736b86d925 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested buckets + */ +public class GetBucketsResponse extends AbstractResultResponse { + + public static final ParseField BUCKETS = new ParseField("buckets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_buckets_response", + true, a -> new GetBucketsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), Bucket.PARSER, BUCKETS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetBucketsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetBucketsResponse(List buckets, long count) { + super(BUCKETS, buckets, count); + } + + /** + * The retrieved buckets + * @return the retrieved buckets + */ + public List buckets() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsResponse other = (GetBucketsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java new file mode 100644 index 0000000000000..3de7037e5c8f3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request object to get {@link Job} objects with the matching `jobId`s or + * `groupName`s. + * + * `_all` explicitly gets all the jobs in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs in the cluster + */ +public class GetJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_IDS = new ParseField("job_ids"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private static final String ALL_JOBS = "_all"; + private final List jobIds; + private Boolean allowNoJobs; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_job_request", + true, a -> new GetJobRequest(a[0] == null ? new ArrayList<>() : (List) a[0])); + + static { + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), JOB_IDS); + PARSER.declareBoolean(GetJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + /** + * Helper method to create a query that will get ALL jobs + * @return new {@link GetJobRequest} object searching for the jobId "_all" + */ + public static GetJobRequest getAllJobsRequest() { + return new GetJobRequest(ALL_JOBS); + } + + /** + * Get the specified {@link Job} configurations via their unique jobIds + * @param jobIds must not contain any null values + */ + public GetJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + GetJobRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * All the jobIds for which to get configuration information + */ + public List getJobIds() { + return jobIds; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * @param allowNoJobs If this is {@code false}, then an error is returned when a wildcard (or `_all`) does not match any jobs + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + public Boolean isAllowNoJobs() { + return allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || other.getClass() != getClass()) { + return false; + } + + GetJobRequest that = (GetJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (jobIds.isEmpty() == false) { + builder.field(JOB_IDS.getPreferredName(), jobIds); + } + + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java new file mode 100644 index 0000000000000..0cdf08c6c24a4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link Job} objects and the total count found + */ +public class GetJobResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_response", true, + a -> new GetJobResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), Job.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), AbstractResultResponse.COUNT); + } + + GetJobResponse(List jobBuilders, long count) { + super(RESULTS_FIELD, jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()), count); + } + + /** + * The collection of {@link Job} objects found in the query + */ + public List jobs() { + return results; + } + + public static GetJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobResponse other = (GetJobResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java new file mode 100644 index 0000000000000..d8eb350755dcb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + + +/** + * Request object to get {@link org.elasticsearch.client.ml.job.stats.JobStats} by their respective jobIds + * + * `_all` explicitly gets all the jobs' statistics in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs' statistics in the cluster + */ +public class GetJobStatsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_jobs_stats_request", a -> new GetJobStatsRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + Job.ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareBoolean(GetJobStatsRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private Boolean allowNoJobs; + + /** + * Explicitly gets all jobs statistics + * + * @return a {@link GetJobStatsRequest} for all existing jobs + */ + public static GetJobStatsRequest getAllJobStatsRequest(){ + return new GetJobStatsRequest(ALL_JOBS); + } + + GetJobStatsRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Get the specified Job's statistics via their unique jobIds + * + * @param jobIds must be non-null and each jobId must be non-null + */ + public GetJobStatsRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds for which to get statistics + */ + public List getJobIds() { + return jobIds; + } + + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + * + * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + GetJobStatsRequest that = (GetJobStatsRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java new file mode 100644 index 0000000000000..2e3ba113d193c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link JobStats} objects and the total count found + */ +public class GetJobStatsResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_stats_response", true, + a -> new GetJobStatsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), JobStats.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), COUNT); + } + + GetJobStatsResponse(List jobStats, long count) { + super(RESULTS_FIELD, jobStats, count); + } + + /** + * The collection of {@link JobStats} objects found in the query + */ + public List jobStats() { + return results; + } + + public static GetJobStatsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobStatsResponse other = (GetJobStatsResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java new file mode 100644 index 0000000000000..0a701f5a1433a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve records of a given job + */ +public class GetRecordsRequest implements ToXContentObject, Validatable { + + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField RECORD_SCORE = new ParseField("record_score"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ObjectParser PARSER = new ObjectParser<>("get_buckets_request", GetRecordsRequest::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareBoolean(GetRecordsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetRecordsRequest::setStart, START); + PARSER.declareStringOrNull(GetRecordsRequest::setEnd, END); + PARSER.declareObject(GetRecordsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetRecordsRequest::setRecordScore, RECORD_SCORE); + PARSER.declareString(GetRecordsRequest::setSort, SORT); + PARSER.declareBoolean(GetRecordsRequest::setDescending, DESCENDING); + } + + private String jobId; + private Boolean excludeInterim; + private String start; + private String end; + private PageParams pageParams; + private Double recordScore; + private String sort; + private Boolean descending; + + private GetRecordsRequest() {} + + /** + * Constructs a request to retrieve records of a given job + * @param jobId id of the job to retrieve records of + */ + public GetRecordsRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim records will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only records whose timestamp is on or after the "start" value will be returned. + * @param start value of "start" to be set + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only records whose timestamp is before the "end" value will be returned. + * @param end value of "end" to be set + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams The paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getRecordScore() { + return recordScore; + } + + /** + * Sets the value of "record_score". + * Only records with "record_score" equal or greater will be returned. + * @param recordScore value of "record_score". + */ + public void setRecordScore(double recordScore) { + this.recordScore = recordScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the bucket field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(boolean descending) { + this.descending = descending; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (recordScore != null) { + builder.field(RECORD_SCORE.getPreferredName(), recordScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, excludeInterim, recordScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRecordsRequest other = (GetRecordsRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(recordScore, other.recordScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java new file mode 100644 index 0000000000000..99e1152422609 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested buckets + */ +public class GetRecordsResponse extends AbstractResultResponse { + + public static final ParseField RECORDS = new ParseField("records"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_records_response", + true, a -> new GetRecordsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), AnomalyRecord.PARSER, RECORDS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetRecordsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetRecordsResponse(List buckets, long count) { + super(RECORDS, buckets, count); + } + + /** + * The retrieved records + * @return the retrieved records + */ + public List records() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRecordsResponse other = (GetRecordsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java new file mode 100644 index 0000000000000..892df340abd6b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A Pojo class containing an Elastic Node's attributes + */ +public class NodeAttributes implements ToXContentObject { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField NAME = new ParseField("name"); + public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id"); + public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address"); + public static final ParseField ATTRIBUTES = new ParseField("attributes"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("node", true, + (a) -> { + int i = 0; + String id = (String) a[i++]; + String name = (String) a[i++]; + String ephemeralId = (String) a[i++]; + String transportAddress = (String) a[i++]; + Map attributes = (Map) a[i]; + return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> p.mapStrings(), + ATTRIBUTES, + ObjectParser.ValueType.OBJECT); + } + + private final String id; + private final String name; + private final String ephemeralId; + private final String transportAddress; + private final Map attributes; + + public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map attributes) { + this.id = id; + this.name = name; + this.ephemeralId = ephemeralId; + this.transportAddress = transportAddress; + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * The unique identifier of the node. + */ + public String getId() { + return id; + } + + /** + * The node name. + */ + public String getName() { + return name; + } + + /** + * The ephemeral id of the node. + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The host and port where transport HTTP connections are accepted. + */ + public String getTransportAddress() { + return transportAddress; + } + + /** + * Additional attributes related to this node e.g., {"ml.max_open_jobs": "10"}. + */ + public Map getAttributes() { + return attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(NAME.getPreferredName(), name); + builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId); + builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress); + builder.field(ATTRIBUTES.getPreferredName(), attributes); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ephemeralId, transportAddress, attributes); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + NodeAttributes that = (NodeAttributes) other; + return Objects.equals(id, that.id) && + Objects.equals(name, that.name) && + Objects.equals(ephemeralId, that.ephemeralId) && + Objects.equals(transportAddress, that.transportAddress) && + Objects.equals(attributes, that.attributes); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java similarity index 85% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java index a18a18bb55a14..5b8e68cd72dc3 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobRequest.java @@ -16,23 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Request to open a Machine Learning Job + */ public class OpenJobRequest extends ActionRequest implements ToXContentObject { public static final ParseField TIMEOUT = new ParseField("timeout"); @@ -51,6 +53,11 @@ public static OpenJobRequest fromXContent(XContentParser parser) throws IOExcept private String jobId; private TimeValue timeout; + /** + * Create a new request with the desired jobId + * + * @param jobId unique jobId, must not be null + */ public OpenJobRequest(String jobId) { this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); } @@ -59,6 +66,11 @@ public String getJobId() { return jobId; } + /** + * The jobId to open + * + * @param jobId unique jobId, must not be null + */ public void setJobId(String jobId) { this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); } @@ -67,6 +79,11 @@ public TimeValue getTimeout() { return timeout; } + /** + * How long to wait for job to open before timing out the request + * + * @param timeout default value of 30 minutes + */ public void setTimeout(TimeValue timeout) { this.timeout = timeout; } @@ -77,7 +94,7 @@ public ActionRequestValidationException validate() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(Job.ID.getPreferredName(), jobId); if (timeout != null) { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java similarity index 78% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java index d8850ddbbe3a8..2536aeeaf78bb 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/OpenJobResponse.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -28,22 +28,23 @@ import java.io.IOException; import java.util.Objects; +/** + * Response indicating if the Machine Learning Job is now opened or not + */ public class OpenJobResponse extends ActionResponse implements ToXContentObject { private static final ParseField OPENED = new ParseField("opened"); - public static final ObjectParser PARSER = new ObjectParser<>("open_job_response", true, OpenJobResponse::new); + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("open_job_response", true, (a) -> new OpenJobResponse((Boolean)a[0])); static { - PARSER.declareBoolean(OpenJobResponse::setOpened, OPENED); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), OPENED); } private boolean opened; - OpenJobResponse() { - } - - public OpenJobResponse(boolean opened) { + OpenJobResponse(boolean opened) { this.opened = opened; } @@ -51,14 +52,15 @@ public static OpenJobResponse fromXContent(XContentParser parser) throws IOExcep return PARSER.parse(parser, null); } + /** + * Has the job opened or not + * + * @return boolean value indicating the job opened status + */ public boolean isOpened() { return opened; } - public void setOpened(boolean opened) { - this.opened = opened; - } - @Override public boolean equals(Object other) { if (this == other) { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java similarity index 87% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java index 2cdf1993fccd3..de8529de6bb8a 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobRequest.java @@ -16,22 +16,30 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Request to create a new Machine Learning Job given a {@link Job} configuration + */ public class PutJobRequest extends ActionRequest implements ToXContentObject { private final Job job; + /** + * Construct a new PutJobRequest + * + * @param job a {@link Job} configuration to create + */ public PutJobRequest(Job job) { this.job = job; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java similarity index 84% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java index b37bd35d6b17f..6e6cce52e58c2 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutJobResponse.java @@ -16,17 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Objects; +/** + * Response containing the newly created {@link Job} + */ public class PutJobResponse implements ToXContentObject { private Job job; @@ -35,19 +37,16 @@ public static PutJobResponse fromXContent(XContentParser parser) throws IOExcept return new PutJobResponse(Job.PARSER.parse(parser, null).build()); } - public PutJobResponse(Job job) { + PutJobResponse(Job job) { this.job = job; } - public PutJobResponse() { - } - public Job getResponse() { return job; } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { job.toXContent(builder, params); return builder; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java index 0b9d9f1204614..10e7b3f974941 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/ChunkingConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index 929d4dacb90fa..752752b103885 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -27,7 +28,6 @@ import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 787bdf06e5ec2..184d5d51481fa 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -26,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java index 00fa1bdd47fed..9b759599dda3c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; @@ -300,6 +300,10 @@ public int hashCode() { multivariateByFields); } + public static Builder builder(List detectors) { + return new Builder(detectors); + } + public static class Builder { private List detectors; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java index f69b9ccbf9ff4..22d26f06fd8ce 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimits.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisLimits.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java index dc7f047b80404..3a2243d6548fd 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java index a3f8c2563b2d8..636b8c6ad5014 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescription.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DataDescription.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java index 081e685fc741b..25b4fbbb2a7ee 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DefaultDetectorDescription.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DefaultDetectorDescription.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Strings; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java index 9a73afe885b1c..bcba8a7d74a61 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRule.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java index 3274b03877f14..e1af60269b52b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; @@ -265,6 +265,10 @@ public int hashCode() { excludeFrequent, rules, detectorIndex); } + public static Builder builder() { + return new Builder(); + } + public static class Builder { private String detectorDescription; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java index 5d9a06948d0fb..932782101ba7c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorFunction.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectorFunction.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import java.util.Arrays; import java.util.Collections; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java index 9afbdf4876fd8..b686ad92ae533 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRef.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/FilterRef.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java index 6bc1be3b56384..aff74271f1c0b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Collections; @@ -412,6 +412,10 @@ public final String toString() { return Strings.toString(this); } + public static Builder builder(String id) { + return new Builder(id); + } + public static class Builder { private String id; @@ -435,7 +439,7 @@ public static class Builder { private String resultsIndexName; private boolean deleted; - public Builder() { + private Builder() { } public Builder(String id) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java new file mode 100644 index 0000000000000..32684bd7e62b4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.config; + +import java.util.Locale; + +/** + * Jobs whether running or complete are in one of these states. + * When a job is created it is initialised in the state closed + * i.e. it is not running. + */ +public enum JobState { + + CLOSING, CLOSED, OPENED, FAILED, OPENING; + + public static JobState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java index bcbc0c295c2d5..e0d1bd0849b3b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilter.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java index 59b0252a7660e..b39db054b308b 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/ModelPlotConfig.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java index c3dc52e5a3cb9..37d6275203560 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Operator.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Operator.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java similarity index 95% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java index 9e2364b4fd960..05b6ef6e19754 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleAction.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleAction.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import java.util.Locale; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java index ec19547fe13be..14389809bd2fa 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleCondition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleCondition.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java similarity index 89% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java index aa12d5ea2a2bd..8b6886d582524 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScope.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/RuleScope.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ContextParser; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -50,7 +50,7 @@ public static ContextParser parser() { Map value = (Map) entry.getValue(); builder.map(value); try (XContentParser scopeParser = XContentFactory.xContent(builder.contentType()).createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Strings.toString(builder))) { + NamedXContentRegistry.EMPTY, DEPRECATION_HANDLER, Strings.toString(builder))) { scope.put(entry.getKey(), FilterRef.PARSER.parse(scopeParser, null)); } } @@ -59,6 +59,15 @@ public static ContextParser parser() { }; } + private static final DeprecationHandler DEPRECATION_HANDLER = new DeprecationHandler() { + + @Override + public void usedDeprecatedName(String usedName, String modernName) {} + + @Override + public void usedDeprecatedField(String usedName, String replacedWith) {} + }; + private final Map scope; public RuleScope() { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java index e07312d12e1f4..7afef0785fe38 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCounts.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java @@ -16,15 +16,15 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java index 50f655b4dd7f1..c9a34fe5c98d9 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.Result; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.results.Result; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java index 2b9957f9bc756..603bff0d90653 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.Version; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.TimeUtil; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.util.TimeUtil; import java.io.IOException; import java.util.Date; @@ -221,10 +221,8 @@ public boolean equals(Object other) { public static class Builder { private String jobId; - // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // Stored snapshot documents created prior to 6.3.0 will have no value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java similarity index 96% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java index 1c047d6c30284..795028847a0be 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/Quantiles.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/Quantiles.java @@ -16,14 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.Date; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java index 7ad57b24fcbdc..4fbe5ac1ff381 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCause.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyCause.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java similarity index 99% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java index 4747f3a48bdc8..db4483fef4bfd 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecord.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/AnomalyRecord.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java index cbaf83abbad40..2dfed4c383403 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Bucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Bucket.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java index 29d8447cd6a37..6fc2a9b8b2d54 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/BucketInfluencer.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java index 59b59006b33a1..dd65899e67e12 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/CategoryDefinition.java @@ -16,13 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java index 53607479d66f4..bfcc545362d3a 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influence.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influence.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java index 51c88883608b0..28ceb243bf6b2 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Influencer.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Influencer.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java similarity index 98% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java index 4f13b4b26646e..eaf050f8be9fb 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucket.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/OverallBucket.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; +import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,7 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; import java.io.IOException; import java.time.format.DateTimeFormatter; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java similarity index 95% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java index cce5fa65ebb44..a7f8933a0a131 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/results/Result.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/results/Result.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.ParseField; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java new file mode 100644 index 0000000000000..a6b41beca8366 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A class to hold statistics about forecasts. + */ +public class ForecastStats implements ToXContentObject { + + public static final ParseField TOTAL = new ParseField("total"); + public static final ParseField FORECASTED_JOBS = new ParseField("forecasted_jobs"); + public static final ParseField MEMORY_BYTES = new ParseField("memory_bytes"); + public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms"); + public static final ParseField RECORDS = new ParseField("records"); + public static final ParseField STATUS = new ParseField("status"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("forecast_stats", + true, + (a) -> { + int i = 0; + long total = (long)a[i++]; + SimpleStats memoryStats = (SimpleStats)a[i++]; + SimpleStats recordStats = (SimpleStats)a[i++]; + SimpleStats runtimeStats = (SimpleStats)a[i++]; + Map statusCounts = (Map)a[i]; + return new ForecastStats(total, memoryStats, recordStats, runtimeStats, statusCounts); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, MEMORY_BYTES); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, RECORDS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, PROCESSING_TIME_MS); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + p -> { + Map counts = new HashMap<>(); + p.map().forEach((key, value) -> counts.put(key, ((Number)value).longValue())); + return counts; + }, STATUS, ObjectParser.ValueType.OBJECT); + } + + private final long total; + private final long forecastedJobs; + private SimpleStats memoryStats; + private SimpleStats recordStats; + private SimpleStats runtimeStats; + private Map statusCounts; + + public ForecastStats(long total, + SimpleStats memoryStats, + SimpleStats recordStats, + SimpleStats runtimeStats, + Map statusCounts) { + this.total = total; + this.forecastedJobs = total > 0 ? 1 : 0; + if (total > 0) { + this.memoryStats = Objects.requireNonNull(memoryStats); + this.recordStats = Objects.requireNonNull(recordStats); + this.runtimeStats = Objects.requireNonNull(runtimeStats); + this.statusCounts = Collections.unmodifiableMap(statusCounts); + } + } + + /** + * The number of forecasts currently available for this model. + */ + public long getTotal() { + return total; + } + + /** + * The number of jobs that have at least one forecast. + */ + public long getForecastedJobs() { + return forecastedJobs; + } + + /** + * Statistics about the memory usage: minimum, maximum, average and total. + */ + public SimpleStats getMemoryStats() { + return memoryStats; + } + + /** + * Statistics about the number of forecast records: minimum, maximum, average and total. + */ + public SimpleStats getRecordStats() { + return recordStats; + } + + /** + * Statistics about the forecast runtime in milliseconds: minimum, maximum, average and total + */ + public SimpleStats getRuntimeStats() { + return runtimeStats; + } + + /** + * Counts per forecast status, for example: {"finished" : 2}. + */ + public Map getStatusCounts() { + return statusCounts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOTAL.getPreferredName(), total); + builder.field(FORECASTED_JOBS.getPreferredName(), forecastedJobs); + + if (total > 0) { + builder.field(MEMORY_BYTES.getPreferredName(), memoryStats); + builder.field(RECORDS.getPreferredName(), recordStats); + builder.field(PROCESSING_TIME_MS.getPreferredName(), runtimeStats); + builder.field(STATUS.getPreferredName(), statusCounts); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(total, forecastedJobs, memoryStats, recordStats, runtimeStats, statusCounts); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ForecastStats other = (ForecastStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(forecastedJobs, other.forecastedJobs) && + Objects.equals(memoryStats, other.memoryStats) && + Objects.equals(recordStats, other.recordStats) && + Objects.equals(runtimeStats, other.runtimeStats) && + Objects.equals(statusCounts, other.statusCounts); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java new file mode 100644 index 0000000000000..df5be4aa4c5cc --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.client.ml.NodeAttributes; + +import java.io.IOException; +import java.util.Objects; + +/** + * Class containing the statistics for a Machine Learning job. + * + */ +public class JobStats implements ToXContentObject { + + private static final ParseField DATA_COUNTS = new ParseField("data_counts"); + private static final ParseField MODEL_SIZE_STATS = new ParseField("model_size_stats"); + private static final ParseField FORECASTS_STATS = new ParseField("forecasts_stats"); + private static final ParseField STATE = new ParseField("state"); + private static final ParseField NODE = new ParseField("node"); + private static final ParseField OPEN_TIME = new ParseField("open_time"); + private static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("job_stats", + true, + (a) -> { + int i = 0; + String jobId = (String) a[i++]; + DataCounts dataCounts = (DataCounts) a[i++]; + JobState jobState = (JobState) a[i++]; + ModelSizeStats.Builder modelSizeStatsBuilder = (ModelSizeStats.Builder) a[i++]; + ModelSizeStats modelSizeStats = modelSizeStatsBuilder == null ? null : modelSizeStatsBuilder.build(); + ForecastStats forecastStats = (ForecastStats) a[i++]; + NodeAttributes node = (NodeAttributes) a[i++]; + String assignmentExplanation = (String) a[i++]; + TimeValue openTime = (TimeValue) a[i]; + return new JobStats(jobId, + dataCounts, + jobState, + modelSizeStats, + forecastStats, + node, + assignmentExplanation, + openTime); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataCounts.PARSER, DATA_COUNTS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> JobState.fromString(p.text()), + STATE, + ObjectParser.ValueType.VALUE); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, MODEL_SIZE_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastStats.PARSER, FORECASTS_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), NodeAttributes.PARSER, NODE); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ASSIGNMENT_EXPLANATION); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), OPEN_TIME.getPreferredName()), + OPEN_TIME, + ObjectParser.ValueType.STRING_OR_NULL); + } + + + private final String jobId; + private final DataCounts dataCounts; + private final JobState state; + private final ModelSizeStats modelSizeStats; + private final ForecastStats forecastStats; + private final NodeAttributes node; + private final String assignmentExplanation; + private final TimeValue openTime; + + JobStats(String jobId, DataCounts dataCounts, JobState state, @Nullable ModelSizeStats modelSizeStats, + @Nullable ForecastStats forecastStats, @Nullable NodeAttributes node, + @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { + this.jobId = Objects.requireNonNull(jobId); + this.dataCounts = Objects.requireNonNull(dataCounts); + this.state = Objects.requireNonNull(state); + this.modelSizeStats = modelSizeStats; + this.forecastStats = forecastStats; + this.node = node; + this.assignmentExplanation = assignmentExplanation; + this.openTime = opentime; + } + + /** + * The jobId referencing the job for these statistics + */ + public String getJobId() { + return jobId; + } + + /** + * An object that describes the number of records processed and any related error counts + * See {@link DataCounts} + */ + public DataCounts getDataCounts() { + return dataCounts; + } + + /** + * An object that provides information about the size and contents of the model. + * See {@link ModelSizeStats} + */ + public ModelSizeStats getModelSizeStats() { + return modelSizeStats; + } + + /** + * An object that provides statistical information about forecasts of this job. + * See {@link ForecastStats} + */ + public ForecastStats getForecastStats() { + return forecastStats; + } + + /** + * The status of the job + * See {@link JobState} + */ + public JobState getState() { + return state; + } + + /** + * For open jobs only, contains information about the node where the job runs + * See {@link NodeAttributes} + */ + public NodeAttributes getNode() { + return node; + } + + /** + * For open jobs only, contains messages relating to the selection of a node to run the job. + */ + public String getAssignmentExplanation() { + return assignmentExplanation; + } + + /** + * For open jobs only, the elapsed time for which the job has been open + */ + public TimeValue getOpenTime() { + return openTime; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(DATA_COUNTS.getPreferredName(), dataCounts); + builder.field(STATE.getPreferredName(), state.toString()); + if (modelSizeStats != null) { + builder.field(MODEL_SIZE_STATS.getPreferredName(), modelSizeStats); + } + if (forecastStats != null) { + builder.field(FORECASTS_STATS.getPreferredName(), forecastStats); + } + if (node != null) { + builder.field(NODE.getPreferredName(), node); + } + if (assignmentExplanation != null) { + builder.field(ASSIGNMENT_EXPLANATION.getPreferredName(), assignmentExplanation); + } + if (openTime != null) { + builder.field(OPEN_TIME.getPreferredName(), openTime.getStringRep()); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, dataCounts, modelSizeStats, forecastStats, state, node, assignmentExplanation, openTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + JobStats other = (JobStats) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(this.dataCounts, other.dataCounts) && + Objects.equals(this.modelSizeStats, other.modelSizeStats) && + Objects.equals(this.forecastStats, other.forecastStats) && + Objects.equals(this.state, other.state) && + Objects.equals(this.node, other.node) && + Objects.equals(this.assignmentExplanation, other.assignmentExplanation) && + Objects.equals(this.openTime, other.openTime); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java new file mode 100644 index 0000000000000..f4c8aa0fa3b29 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Helper class for min, max, avg and total statistics for a quantity + */ +public class SimpleStats implements ToXContentObject { + + public static final ParseField MIN = new ParseField("min"); + public static final ParseField MAX = new ParseField("max"); + public static final ParseField AVG = new ParseField("avg"); + public static final ParseField TOTAL = new ParseField("total"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("simple_stats", true, + (a) -> { + int i = 0; + double total = (double)a[i++]; + double min = (double)a[i++]; + double max = (double)a[i++]; + double avg = (double)a[i++]; + return new SimpleStats(total, min, max, avg); + }); + + static { + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MIN); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MAX); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), AVG); + } + + private final double total; + private final double min; + private final double max; + private final double avg; + + SimpleStats(double total, double min, double max, double avg) { + this.total = total; + this.min = min; + this.max = max; + this.avg = avg; + } + + public double getMin() { + return min; + } + + public double getMax() { + return max; + } + + public double getAvg() { + return avg; + } + + public double getTotal() { + return total; + } + + @Override + public int hashCode() { + return Objects.hash(total, min, max, avg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SimpleStats other = (SimpleStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(min, other.min) && + Objects.equals(avg, other.avg) && + Objects.equals(max, other.max); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN.getPreferredName(), min); + builder.field(MAX.getPreferredName(), max); + builder.field(AVG.getPreferredName(), avg); + builder.field(TOTAL.getPreferredName(), total); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java new file mode 100644 index 0000000000000..52d54188f7007 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.util; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Paging parameters for GET requests + */ +public class PageParams implements ToXContentObject { + + public static final ParseField PAGE = new ParseField("page"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField SIZE = new ParseField("size"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(PAGE.getPreferredName(), + a -> new PageParams((Integer) a[0], (Integer) a[1])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FROM); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SIZE); + } + + private final Integer from; + private final Integer size; + + /** + * Constructs paging parameters + * @param from skips the specified number of items. When {@code null} the default value will be used. + * @param size specifies the maximum number of items to obtain. When {@code null} the default value will be used. + */ + public PageParams(@Nullable Integer from, @Nullable Integer size) { + this.from = from; + this.size = size; + } + + public int getFrom() { + return from; + } + + public int getSize() { + return size; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(FROM.getPreferredName(), from); + } + if (size != null) { + builder.field(SIZE.getPreferredName(), size); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(from, size); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PageParams other = (PageParams) obj; + return Objects.equals(from, other.from) && + Objects.equals(size, other.size); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java similarity index 97% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java index 549b196949145..4c21ffb2175b2 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/TimeUtil.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/TimeUtil.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.util; +package org.elasticsearch.client.ml.job.util; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java new file mode 100644 index 0000000000000..8b72f704edff4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Enumeration of values that control the refresh policy for a request that + * supports specifying a refresh policy. + */ +public enum RefreshPolicy { + + /** + * Don't refresh after this request. The default. + */ + NONE("false"), + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE("true"), + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL("wait_for"); + + private final String value; + + RefreshPolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Get the default refresh policy, which is NONE + */ + public static RefreshPolicy getDefault() { + return RefreshPolicy.NONE; + } +} diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 33e136a66f44f..cc179e12e3163 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -20,5 +20,14 @@ org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users +org.elasticsearch.common.logging.DeprecationLogger +org.elasticsearch.common.logging.ESLoggerFactory +org.elasticsearch.common.logging.LogConfigurator +org.elasticsearch.common.logging.LoggerMessageFormat +org.elasticsearch.common.logging.Loggers +org.elasticsearch.common.logging.NodeNamePatternConverter +org.elasticsearch.common.logging.PrefixLogger + @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.elasticsearch.common.xcontent.LoggingDeprecationHandler diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 58b4b268788b5..a914008376a5d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; +import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -34,6 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.client.Request; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; @@ -174,6 +176,8 @@ public void testClusterHealthYellowClusterLevel() throws IOException { request.timeout("5s"); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } @@ -186,6 +190,8 @@ public void testClusterHealthYellowIndicesLevel() throws IOException { request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(2)); for (Map.Entry entry : response.getIndices().entrySet()) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 89f357477fa06..e02d9f451ebe0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -41,12 +41,17 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -624,6 +629,135 @@ public void testBulk() throws IOException { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } + public void testReindex() throws IOException { + final String sourceIndex = "source1"; + final String destinationIndex = "dest"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destinationIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); + reindexRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + assertEquals(1, bulkResponse.getCreated()); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: create 1 and update 1 + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + assertEquals(1, bulkResponse.getCreated()); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + } + + public void testUpdateByQuery() throws IOException { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: update using script + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(2, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 3, + (int) (highLevelClient().get(new GetRequest(sourceIndex, "type", "2"), RequestOptions.DEFAULT) + .getSourceAsMap().get("foo")) + ); + } + } + public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java new file mode 100644 index 0000000000000..4376b47d737b4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +public class GraphIT extends ESRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/1"); + doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}"); + client().performRequest(doc2); + + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}"); + client().performRequest(doc3); + + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}"); + client().performRequest(doc4); + + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}"); + client().performRequest(doc5); + + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCleanExplore() throws Exception { + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(0)); + + } + + public void testBadExplore() throws Exception { + //Explore indices where lack of fielddata=true on one index leads to partial failures + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2", "index_no_field_data"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(1)); + assertTrue(failures[0].reason().contains("Fielddata is disabled")); + + } + + +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java new file mode 100644 index 0000000000000..d84099d9a3c40 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class MLRequestConvertersTests extends ESTestCase { + + public void testPutJob() throws IOException { + Job job = createValidJob("foo"); + PutJobRequest putJobRequest = new PutJobRequest(job); + + Request request = MLRequestConverters.putJob(putJobRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + Job parsedJob = Job.PARSER.apply(parser, null).build(); + assertThat(parsedJob, equalTo(job)); + } + } + + public void testGetJob() { + GetJobRequest getJobRequest = new GetJobRequest(); + + Request request = MLRequestConverters.getJob(getJobRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobRequest = new GetJobRequest("job1", "jobs*"); + getJobRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJob(getJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + + public void testOpenJob() throws Exception { + String jobId = "some-job-id"; + OpenJobRequest openJobRequest = new OpenJobRequest(jobId); + openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); + + Request request = MLRequestConverters.openJob(openJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); + assertEquals(requestEntityToString(request), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + } + + public void testCloseJob() throws Exception { + String jobId = "somejobid"; + CloseJobRequest closeJobRequest = new CloseJobRequest(jobId); + + Request request = MLRequestConverters.closeJob(closeJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid\"}", requestEntityToString(request)); + + closeJobRequest = new CloseJobRequest(jobId, "otherjobs*"); + closeJobRequest.setForce(true); + closeJobRequest.setAllowNoJobs(false); + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); + request = MLRequestConverters.closeJob(closeJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid,otherjobs*\",\"timeout\":\"10m\",\"force\":true,\"allow_no_jobs\":false}", + requestEntityToString(request)); + } + + public void testDeleteJob() { + String jobId = randomAlphaOfLength(10); + DeleteJobRequest deleteJobRequest = new DeleteJobRequest(jobId); + + Request request = MLRequestConverters.deleteJob(deleteJobRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId, request.getEndpoint()); + assertEquals(Boolean.toString(false), request.getParameters().get("force")); + + deleteJobRequest.setForce(true); + request = MLRequestConverters.deleteJob(deleteJobRequest); + assertEquals(Boolean.toString(true), request.getParameters().get("force")); + } + + public void testGetBuckets() throws IOException { + String jobId = randomAlphaOfLength(10); + GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); + getBucketsRequest.setPageParams(new PageParams(100, 300)); + getBucketsRequest.setAnomalyScore(75.0); + getBucketsRequest.setSort("anomaly_score"); + getBucketsRequest.setDescending(true); + + Request request = MLRequestConverters.getBuckets(getBucketsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/buckets", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetBucketsRequest parsedRequest = GetBucketsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getBucketsRequest)); + } + } + + public void testFlushJob() throws Exception { + String jobId = randomAlphaOfLength(10); + FlushJobRequest flushJobRequest = new FlushJobRequest(jobId); + + Request request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_flush", request.getEndpoint()); + assertEquals("{\"job_id\":\"" + jobId + "\"}", requestEntityToString(request)); + + flushJobRequest.setSkipTime("1000"); + flushJobRequest.setStart("105"); + flushJobRequest.setEnd("200"); + flushJobRequest.setAdvanceTime("100"); + flushJobRequest.setCalcInterim(true); + request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals( + "{\"job_id\":\"" + jobId + "\",\"calc_interim\":true,\"start\":\"105\"," + + "\"end\":\"200\",\"advance_time\":\"100\",\"skip_time\":\"1000\"}", + requestEntityToString(request)); + } + + public void testGetJobStats() { + GetJobStatsRequest getJobStatsRequestRequest = new GetJobStatsRequest(); + + Request request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/_stats", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobStatsRequestRequest = new GetJobStatsRequest("job1", "jobs*"); + getJobStatsRequestRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*/_stats", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + + private static Job createValidJob(String jobId) { + AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( + Detector.builder().setFunction("count").build())); + Job.Builder jobBuilder = Job.builder(jobId); + jobBuilder.setAnalysisConfig(analysisConfig); + return jobBuilder.build(); + } + + private static String requestEntityToString(Request request) throws Exception { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + request.getEntity().writeTo(bos); + return bos.toString("UTF-8"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java new file mode 100644 index 0000000000000..6c8ca81cea224 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -0,0 +1,299 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { + + private static final String RESULTS_INDEX = ".ml-anomalies-shared"; + private static final String DOC = "doc"; + + private static final String JOB_ID = "get-results-it-job"; + + // 2018-08-01T00:00:00Z + private static final long START_TIME_EPOCH_MS = 1533081600000L; + + private Stats bucketStats = new Stats(); + private Stats recordStats = new Stats(); + + @Before + public void createJobAndIndexResults() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + Job job = MachineLearningIT.buildJob(JOB_ID); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + long time = START_TIME_EPOCH_MS; + long endTime = time + 3600000L * 24 * 10; // 10 days of hourly buckets + while (time < endTime) { + addBucketIndexRequest(time, false, bulkRequest); + addRecordIndexRequests(time, false, bulkRequest); + time += 3600000L; + } + + // Also index an interim bucket + addBucketIndexRequest(time, true, bulkRequest); + addRecordIndexRequest(time, true, bulkRequest); + + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + } + + private void addBucketIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double bucketScore = randomDoubleBetween(0.0, 100.0, true); + bucketStats.report(bucketScore); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"bucket\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"anomaly_score\": " + bucketScore + + ", \"bucket_influencers\":[{\"job_id\": \"" + JOB_ID + "\", \"result_type\":\"bucket_influencer\", " + + "\"influencer_field_name\": \"bucket_time\", \"timestamp\": " + timestamp + ", \"bucket_span\": 3600, " + + "\"is_interim\": " + isInterim + "}]}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + private void addRecordIndexRequests(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + if (randomBoolean()) { + return; + } + int recordCount = randomIntBetween(1, 3); + for (int i = 0; i < recordCount; ++i) { + addRecordIndexRequest(timestamp, isInterim, bulkRequest); + } + } + + private void addRecordIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double recordScore = randomDoubleBetween(0.0, 100.0, true); + recordStats.report(recordScore); + double p = randomDoubleBetween(0.0, 0.05, false); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"record\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"record_score\": " + recordScore + ", \"probability\": " + + p + "}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + @After + public void deleteJob() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + + public void testGetBuckets() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(241L)); + assertThat(response.buckets().size(), equalTo(100)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setTimestamp("1533081600000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.buckets().size(), equalTo(1)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setAnomalyScore(75.0); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(bucketStats.criticalCount)); + assertThat(response.buckets().size(), equalTo((int) Math.min(100, bucketStats.criticalCount))); + assertThat(response.buckets().stream().anyMatch(b -> b.getAnomalyScore() < 75.0), is(false)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setExcludeInterim(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(240L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setStart("1533081600000"); + request.setEnd("1533092400000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * + 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setPageParams(new PageParams(3, 3)); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().size(), equalTo(3)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3 * 3600000L)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 4 * 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 5 * 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setSort("anomaly_score"); + request.setDescending(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + double previousScore = 100.0; + for (Bucket bucket : response.buckets()) { + assertThat(bucket.getAnomalyScore(), lessThanOrEqualTo(previousScore)); + previousScore = bucket.getAnomalyScore(); + } + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + // Make sure we get all buckets + request.setPageParams(new PageParams(0, 10000)); + request.setExpand(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().stream().anyMatch(b -> b.getRecords().size() > 0), is(true)); + } + } + + public void testGetRecords() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.count(), greaterThan(0L)); + assertThat(response.count(), equalTo(recordStats.totalCount())); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setRecordScore(50.0); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + long majorAndCriticalCount = recordStats.majorCount + recordStats.criticalCount; + assertThat(response.count(), equalTo(majorAndCriticalCount)); + assertThat(response.records().size(), equalTo((int) Math.min(100, majorAndCriticalCount))); + assertThat(response.records().stream().anyMatch(r -> r.getRecordScore() < 50.0), is(false)); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setExcludeInterim(true); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.count(), equalTo(recordStats.totalCount() - 1)); + } + { + long end = START_TIME_EPOCH_MS + 10 * 3600000; + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setStart(String.valueOf(START_TIME_EPOCH_MS)); + request.setEnd(String.valueOf(end)); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + for (AnomalyRecord record : response.records()) { + assertThat(record.getTimestamp().getTime(), greaterThanOrEqualTo(START_TIME_EPOCH_MS)); + assertThat(record.getTimestamp().getTime(), lessThan(end)); + } + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setPageParams(new PageParams(3, 3)); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + assertThat(response.records().size(), equalTo(3)); + } + { + GetRecordsRequest request = new GetRecordsRequest(JOB_ID); + request.setSort("probability"); + request.setDescending(true); + + GetRecordsResponse response = execute(request, machineLearningClient::getRecords, machineLearningClient::getRecordsAsync); + + double previousProb = 1.0; + for (AnomalyRecord record : response.records()) { + assertThat(record.getProbability(), lessThanOrEqualTo(previousProb)); + previousProb = record.getProbability(); + } + } + } + + private static class Stats { + // score < 50.0 + private long minorCount; + + // score < 75.0 + private long majorCount; + + // score > 75.0 + private long criticalCount; + + private void report(double score) { + if (score < 50.0) { + minorCount++; + } else if (score < 75.0) { + majorCount++; + } else { + criticalCount++; + } + } + + private long totalCount() { + return minorCount + majorCount + criticalCount; + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 94e73a14c188c..cd4b6ffc7691f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,23 +19,49 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; -import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; -import org.elasticsearch.protocol.xpack.ml.job.config.Detector; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.junit.After; +import java.io.IOException; import java.util.Arrays; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; public class MachineLearningIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testPutJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -48,6 +74,54 @@ public void testPutJob() throws Exception { assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE)); } + public void testGetJob() throws Exception { + String jobId1 = randomValidJobId(); + String jobId2 = randomValidJobId(); + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + GetJobRequest request = new GetJobRequest(jobId1, jobId2); + + // Test getting specific jobs + GetJobResponse response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + + // Test getting all jobs explicitly + request = GetJobRequest.getAllJobsRequest(); + response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all jobs implicitly + response = execute(new GetJobRequest(), machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + } + + public void testDeleteJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + DeleteJobResponse response = execute(new DeleteJobRequest(jobId), + machineLearningClient::deleteJob, + machineLearningClient::deleteJobAsync); + + assertTrue(response.isAcknowledged()); + } + public void testOpenJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -60,6 +134,90 @@ public void testOpenJob() throws Exception { assertTrue(response.isOpened()); } + public void testCloseJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + CloseJobResponse response = execute(new CloseJobRequest(jobId), + machineLearningClient::closeJob, + machineLearningClient::closeJobAsync); + assertTrue(response.isClosed()); + } + + public void testFlushJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + FlushJobResponse response = execute(new FlushJobRequest(jobId), + machineLearningClient::flushJob, + machineLearningClient::flushJobAsync); + assertTrue(response.isFlushed()); + } + + public void testGetJobStats() throws Exception { + String jobId1 = "ml-get-job-stats-test-id-1"; + String jobId2 = "ml-get-job-stats-test-id-2"; + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + machineLearningClient.openJob(new OpenJobRequest(jobId1), RequestOptions.DEFAULT); + + GetJobStatsRequest request = new GetJobStatsRequest(jobId1, jobId2); + + // Test getting specific + GetJobStatsResponse response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + for (JobStats stats : response.jobStats()) { + if (stats.getJobId().equals(jobId1)) { + assertEquals(JobState.OPENED, stats.getState()); + } else { + assertEquals(JobState.CLOSED, stats.getState()); + } + } + + // Test getting all explicitly + request = GetJobStatsRequest.getAllJobStatsRequest(); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all implicitly + response = execute(new GetJobStatsRequest(), machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all with wildcard + request = new GetJobStatsRequest("ml-get-job-stats-test-id-*"); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test when allow_no_jobs is false + final GetJobStatsRequest erroredRequest = new GetJobStatsRequest("jobs-that-do-not-exist*"); + erroredRequest.setAllowNoJobs(false); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(erroredRequest, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java new file mode 100644 index 0000000000000..7ad86576245ef --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * This is temporarily duplicated from the server side. + * @TODO Replace with an implementation using the HLRC once + * the APIs for managing datafeeds are implemented. + */ +public class MlRestTestStateCleaner { + + private final Logger logger; + private final RestClient adminClient; + + public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { + this.logger = logger; + this.adminClient = adminClient; + } + + public void clearMlMetadata() throws IOException { + deleteAllDatafeeds(); + deleteAllJobs(); + // indices will be deleted by the ESRestTestCase class + } + + @SuppressWarnings("unchecked") + private void deleteAllDatafeeds() throws IOException { + final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds"); + datafeedsRequest.addParameter("filter_path", "datafeeds"); + final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest); + final List> datafeeds = + (List>) XContentMapValues.extractValue("datafeeds", ESRestTestCase.entityAsMap(datafeedsResponse)); + if (datafeeds == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop")); + } catch (Exception e1) { + logger.warn("failed to stop all datafeeds. Forcing stop", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all data feeds failed", e2); + } + throw new RuntimeException( + "Had to resort to force-stopping datafeeds, something went wrong?", e1); + } + + for (Map datafeed : datafeeds) { + String datafeedId = (String) datafeed.get("datafeed_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/datafeeds/" + datafeedId)); + } + } + + private void deleteAllJobs() throws IOException { + final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors"); + jobsRequest.addParameter("filter_path", "jobs"); + final Response response = adminClient.performRequest(jobsRequest); + @SuppressWarnings("unchecked") + final List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", ESRestTestCase.entityAsMap(response)); + if (jobConfigs == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close")); + } catch (Exception e1) { + logger.warn("failed to close all jobs. Forcing closed", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all jobs failed", e2); + } + throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?", + e1); + } + + for (Map jobConfig : jobConfigs) { + String jobId = (String) jobConfig.get("job_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 1c9707e0e27fa..92930d14cf4a5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -116,8 +116,10 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.rankeval.PrecisionAtK; @@ -125,13 +127,18 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -170,6 +177,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.enforceSameContentType; @@ -177,6 +185,7 @@ import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; @@ -405,6 +414,118 @@ public void testUpdateAliases() throws IOException { assertToXContentBody(indicesAliasesRequest, request.getEntity()); } + public void testReindex() throws IOException { + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices("source_idx"); + reindexRequest.setDestIndex("dest_idx"); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + RemoteInfo remoteInfo = new RemoteInfo("http", "remote-host", 9200, null, + BytesReference.bytes(matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS)), + "user", + "pass", + emptyMap(), + RemoteInfo.DEFAULT_SOCKET_TIMEOUT, + RemoteInfo.DEFAULT_CONNECT_TIMEOUT + ); + reindexRequest.setRemoteInfo(remoteInfo); + } + if (randomBoolean()) { + reindexRequest.setSourceDocTypes("doc", "tweet"); + } + if (randomBoolean()) { + reindexRequest.setSourceBatchSize(randomInt(100)); + } + if (randomBoolean()) { + reindexRequest.setDestDocType("tweet_and_doc"); + } + if (randomBoolean()) { + reindexRequest.setDestOpType("create"); + } + if (randomBoolean()) { + reindexRequest.setDestPipeline("my_pipeline"); + } + if (randomBoolean()) { + reindexRequest.setDestRouting("=cat"); + } + if (randomBoolean()) { + reindexRequest.setSize(randomIntBetween(100, 1000)); + } + if (randomBoolean()) { + reindexRequest.setAbortOnVersionConflict(false); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + reindexRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + } + if (reindexRequest.getRemoteInfo() == null && randomBoolean()) { + reindexRequest.setSourceQuery(new TermQueryBuilder("foo", "fooval")); + } + setRandomTimeout(reindexRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + setRandomWaitForActiveShards(reindexRequest::setWaitForActiveShards, ActiveShardCount.DEFAULT, expectedParams); + expectedParams.put("scroll", reindexRequest.getScrollTime().getStringRep()); + Request request = RequestConverters.reindex(reindexRequest); + assertEquals("/_reindex", request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(reindexRequest, request.getEntity()); + } + + public void testUpdateByQuery() throws IOException { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(randomIndicesNames(1, 5)); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); + } + if (randomBoolean()) { + int batchSize = randomInt(100); + updateByQueryRequest.setBatchSize(batchSize); + expectedParams.put("scroll_size", Integer.toString(batchSize)); + } + if (randomBoolean()) { + updateByQueryRequest.setPipeline("my_pipeline"); + expectedParams.put("pipeline", "my_pipeline"); + } + if (randomBoolean()) { + updateByQueryRequest.setRouting("=cat"); + expectedParams.put("routing", "=cat"); + } + if (randomBoolean()) { + int size = randomIntBetween(100, 1000); + updateByQueryRequest.setSize(size); + expectedParams.put("size", Integer.toString(size)); + } + if (randomBoolean()) { + updateByQueryRequest.setAbortOnVersionConflict(false); + expectedParams.put("conflicts", "proceed"); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + updateByQueryRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + expectedParams.put("scroll", ts); + } + if (randomBoolean()) { + updateByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval")); + } + if (randomBoolean()) { + updateByQueryRequest.setScript(new Script("ctx._source.last = \"lastname\"")); + } + setRandomIndicesOptions(updateByQueryRequest::setIndicesOptions, updateByQueryRequest::indicesOptions, expectedParams); + setRandomTimeout(updateByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + Request request = RequestConverters.updateByQuery(updateByQueryRequest); + StringJoiner joiner = new StringJoiner("/", "/", ""); + joiner.add(String.join(",", updateByQueryRequest.indices())); + if (updateByQueryRequest.getDocTypes().length > 0) + joiner.add(String.join(",", updateByQueryRequest.getDocTypes())); + joiner.add("_update_by_query"); + assertEquals(joiner.toString(), request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(updateByQueryRequest, request.getEntity()); + } + public void testPutMapping() throws IOException { PutMappingRequest putMappingRequest = new PutMappingRequest(); @@ -2599,6 +2720,35 @@ public void testXPackPutWatch() throws Exception { request.getEntity().writeTo(bos); assertThat(bos.toString("UTF-8"), is(body)); } + + public void testGraphExplore() throws Exception { + Map expectedParams = new HashMap<>(); + + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.sampleDiversityField("diversity"); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.types("type1", "type2"); + int timeout = randomIntBetween(10000, 20000); + graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout)); + graphExploreRequest.useSignificance(randomBoolean()); + int numHops = randomIntBetween(1, 5); + for (int i = 0; i < numHops; i++) { + int hopNumber = i + 1; + QueryBuilder guidingQuery = null; + if (randomBoolean()) { + guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + hop.addVertexRequest("field" + hopNumber); + hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber); + } + Request request = RequestConverters.xPackGraphExplore(graphExploreRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); + assertToXContentBody(graphExploreRequest, request.getEntity()); + } public void testXPackDeleteWatch() { DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); @@ -2611,19 +2761,6 @@ public void testXPackDeleteWatch() { assertThat(request.getEntity(), nullValue()); } - public void testPostMachineLearningOpenJob() throws Exception { - String jobId = "some-job-id"; - OpenJobRequest openJobRequest = new OpenJobRequest(jobId); - openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); - - Request request = RequestConverters.machineLearningOpenJob(openJobRequest); - assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - request.getEntity().writeTo(bos); - assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); - } - /** * Randomize the {@link FetchSourceContext} request parameters. */ diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b5d8dbb628eb9..7df4e07257680 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -660,13 +660,11 @@ public void testApiNamingConventions() throws Exception { "indices.put_alias", "mtermvectors", "put_script", - "reindex", "reindex_rethrottle", "render_search_template", "scripts_painless_execute", "tasks.get", - "termvectors", - "update_by_query" + "termvectors" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { @@ -685,6 +683,7 @@ public void testApiNamingConventions() throws Exception { "nodes.stats", "nodes.hot_threads", "nodes.usage", + "nodes.reload_secure_settings", "search_shards", }; Set deprecatedMethods = new HashSet<>(); @@ -758,6 +757,7 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("license.") == false && apiName.startsWith("machine_learning.") == false && apiName.startsWith("watcher.") == false && + apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false) { apiNotFound.add(apiName); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9c9c5425f0006..739a590ba5f64 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -256,7 +256,7 @@ public void testSearchWithTermsAgg() throws IOException { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -293,7 +293,7 @@ public void testSearchWithRangeAgg() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Range rangeAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", rangeAgg.getName()); assertEquals(2, rangeAgg.getBuckets().size()); @@ -323,7 +323,7 @@ public void testSearchWithTermsAndRangeAgg() throws IOException { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -375,7 +375,7 @@ public void testSearchWithMatrixStats() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); @@ -474,7 +474,7 @@ public void testSearchWithParentJoin() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(3, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); Terms terms = searchResponse.getAggregations().get("top-tags"); assertEquals(0, terms.getDocCountError()); @@ -513,7 +513,7 @@ public void testSearchWithSuggest() throws IOException { assertNull(searchResponse.getAggregations()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().totalHits); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(1, searchResponse.getSuggest().size()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java index 14734c4ab60a3..1d693eee8396e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java @@ -17,9 +17,6 @@ * under the License. */ - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; @@ -35,7 +32,6 @@ import java.util.Collections; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -52,12 +48,9 @@ public void testGetStoredScript() throws Exception { final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); // TODO: change to HighLevel PutStoredScriptRequest when it will be ready // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/calculate-score", emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); - assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); + Request putRequest = new Request("PUT", "/_scripts/calculate-score"); + putRequest.setJsonEntity("{\"script\":" + script + "}"); + Response putResponse = adminClient().performRequest(putRequest); assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); GetStoredScriptRequest getRequest = new GetStoredScriptRequest("calculate-score"); @@ -78,12 +71,9 @@ public void testDeleteStoredScript() throws Exception { final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); // TODO: change to HighLevel PutStoredScriptRequest when it will be ready // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/" + id, emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); - assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); + Request putRequest = new Request("PUT", "/_scripts/" + id); + putRequest.setJsonEntity("{\"script\":" + script + "}"); + Response putResponse = adminClient().performRequest(putRequest); assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest(id); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index ad41c139ddc37..ac9d42c65ca59 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -50,6 +51,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -59,13 +62,23 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.SortOrder; +import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -750,6 +763,263 @@ public void onFailure(Exception e) { } } + public void testReindex() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + createPipeline("my_pipeline"); + } + { + // tag::reindex-request + ReindexRequest request = new ReindexRequest(); // <1> + request.setSourceIndices("source1", "source2"); // <2> + request.setDestIndex("dest"); // <3> + // end::reindex-request + // tag::reindex-request-versionType + request.setDestVersionType(VersionType.EXTERNAL); // <1> + // end::reindex-request-versionType + // tag::reindex-request-opType + request.setDestOpType("create"); // <1> + // end::reindex-request-opType + // tag::reindex-request-conflicts + request.setConflicts("proceed"); // <1> + // end::reindex-request-conflicts + // tag::reindex-request-typeOrQuery + request.setSourceDocTypes("doc"); // <1> + request.setSourceQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::reindex-request-typeOrQuery + // tag::reindex-request-size + request.setSize(10); // <1> + // end::reindex-request-size + // tag::reindex-request-sourceSize + request.setSourceBatchSize(100); // <1> + // end::reindex-request-sourceSize + // tag::reindex-request-pipeline + request.setDestPipeline("my_pipeline"); // <1> + // end::reindex-request-pipeline + // tag::reindex-request-sort + request.addSortField("field1", SortOrder.DESC); // <1> + request.addSortField("field2", SortOrder.ASC); // <2> + // end::reindex-request-sort + // tag::reindex-request-script + request.setScript( + new Script( + ScriptType.INLINE, "painless", + "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", + Collections.emptyMap())); // <1> + // end::reindex-request-script + // tag::reindex-request-remote + request.setRemoteInfo( + new RemoteInfo( + "https", "localhost", 9002, null, new BytesArray(new MatchAllQueryBuilder().toString()), + "user", "pass", Collections.emptyMap(), new TimeValue(100, TimeUnit.MILLISECONDS), + new TimeValue(100, TimeUnit.SECONDS) + ) + ); // <1> + // end::reindex-request-remote + request.setRemoteInfo(null); // Remove it for tests + // tag::reindex-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::reindex-request-timeout + // tag::reindex-request-refresh + request.setRefresh(true); // <1> + // end::reindex-request-refresh + // tag::reindex-request-slices + request.setSlices(2); // <1> + // end::reindex-request-slices + // tag::reindex-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::reindex-request-scroll + + + // tag::reindex-execute + BulkByScrollResponse bulkResponse = client.reindex(request, RequestOptions.DEFAULT); + // end::reindex-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::reindex-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long updatedDocs = bulkResponse.getUpdated(); // <4> + long createdDocs = bulkResponse.getCreated(); // <5> + long deletedDocs = bulkResponse.getDeleted(); // <6> + long batches = bulkResponse.getBatches(); // <7> + long noops = bulkResponse.getNoops(); // <8> + long versionConflicts = bulkResponse.getVersionConflicts(); // <9> + long bulkRetries = bulkResponse.getBulkRetries(); // <10> + long searchRetries = bulkResponse.getSearchRetries(); // <11> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <12> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <13> + List searchFailures = bulkResponse.getSearchFailures(); // <14> + List bulkFailures = bulkResponse.getBulkFailures(); // <15> + // end::reindex-response + } + { + ReindexRequest request = new ReindexRequest(); + request.setSourceIndices("source1"); + request.setDestIndex("dest"); + + // tag::reindex-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::reindex-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::reindex-execute-async + client.reindexAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::reindex-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testUpdateByQuery() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + createPipeline("my_pipeline"); + } + { + // tag::update-by-query-request + UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); // <1> + // end::update-by-query-request + // tag::update-by-query-request-conflicts + request.setConflicts("proceed"); // <1> + // end::update-by-query-request-conflicts + // tag::update-by-query-request-typeOrQuery + request.setDocTypes("doc"); // <1> + request.setQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::update-by-query-request-typeOrQuery + // tag::update-by-query-request-size + request.setSize(10); // <1> + // end::update-by-query-request-size + // tag::update-by-query-request-scrollSize + request.setBatchSize(100); // <1> + // end::update-by-query-request-scrollSize + // tag::update-by-query-request-pipeline + request.setPipeline("my_pipeline"); // <1> + // end::update-by-query-request-pipeline + // tag::update-by-query-request-script + request.setScript( + new Script( + ScriptType.INLINE, "painless", + "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", + Collections.emptyMap())); // <1> + // end::update-by-query-request-script + // tag::update-by-query-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::update-by-query-request-timeout + // tag::update-by-query-request-refresh + request.setRefresh(true); // <1> + // end::update-by-query-request-refresh + // tag::update-by-query-request-slices + request.setSlices(2); // <1> + // end::update-by-query-request-slices + // tag::update-by-query-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::update-by-query-request-scroll + // tag::update-by-query-request-routing + request.setRouting("=cat"); // <1> + // end::update-by-query-request-routing + // tag::update-by-query-request-indicesOptions + request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // <1> + // end::update-by-query-request-indicesOptions + + // tag::update-by-query-execute + BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT); + // end::update-by-query-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::update-by-query-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long updatedDocs = bulkResponse.getUpdated(); // <4> + long deletedDocs = bulkResponse.getDeleted(); // <5> + long batches = bulkResponse.getBatches(); // <6> + long noops = bulkResponse.getNoops(); // <7> + long versionConflicts = bulkResponse.getVersionConflicts(); // <8> + long bulkRetries = bulkResponse.getBulkRetries(); // <9> + long searchRetries = bulkResponse.getSearchRetries(); // <10> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <11> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <12> + List searchFailures = bulkResponse.getSearchFailures(); // <13> + List bulkFailures = bulkResponse.getBulkFailures(); // <14> + // end::update-by-query-response + } + { + UpdateByQueryRequest request = new UpdateByQueryRequest(); + request.indices("source1"); + + // tag::update-by-query-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-by-query-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-by-query-execute-async + client.updateByQueryAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-by-query-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGet() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java new file mode 100644 index 0000000000000..8631e18b8739b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.documentation; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class GraphDocumentationIT extends ESRestHighLevelClientTestCase { + + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}"); + client().performRequest(doc2); + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + @SuppressForbidden(reason = "system out is ok for a documentation example") + public void testExplore() throws Exception { + RestHighLevelClient client = highLevelClient(); + + + + // tag::x-pack-graph-explore-request + GraphExploreRequest request = new GraphExploreRequest(); + request.indices("index1", "index2"); + request.useSignificance(false); + TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx"); + + Hop hop1 = request.createNextHop(startingQuery); // <1> + VertexRequest people = hop1.addVertexRequest("participants"); // <2> + people.minDocCount(1); + VertexRequest files = hop1.addVertexRequest("attachment_md5"); + files.minDocCount(1); + + Hop hop2 = request.createNextHop(null); // <3> + VertexRequest vr2 = hop2.addVertexRequest("participants"); + vr2.minDocCount(5); + + GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4> + // end::x-pack-graph-explore-request + + + // tag::x-pack-graph-explore-response + Collection v = exploreResponse.getVertices(); + Collection c = exploreResponse.getConnections(); + for (Vertex vertex : v) { + System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1> + " discovered at hop depth " + vertex.getHopDepth()); + } + for (Connection link : c) { + System.out.println(link.getFrom() + " -> " + link.getTo() // <2> + + " evidenced by " + link.getDocCount() + " docs"); + } + // end::x-pack-graph-explore-response + + + Collection initialVertices = exploreResponse.getVertices(); + + // tag::x-pack-graph-explore-expand + GraphExploreRequest expandRequest = new GraphExploreRequest(); + expandRequest.indices("index1", "index2"); + + + Hop expandHop1 = expandRequest.createNextHop(null); // <1> + VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + fromPeople.addInclude(vertex.getTerm(), 1f); + } + } + + Hop expandHop2 = expandRequest.createNextHop(null); + VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + newPeople.addExclude(vertex.getTerm()); + } + } + + GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT); + // end::x-pack-graph-explore-expand + + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 50cd244c0fa07..f92f01f6bad19 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -20,30 +20,62 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.MachineLearningIT; +import org.elasticsearch.client.MlRestTestStateCleaner; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.GetRecordsRequest; +import org.elasticsearch.client.ml.GetRecordsResponse; +import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobResponse; -import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; -import org.elasticsearch.protocol.xpack.ml.job.config.Detector; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.junit.After; +import java.io.IOException; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testCreateJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -122,6 +154,113 @@ public void onFailure(Exception e) { } } + public void testGetJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String jobId = "get-machine-learning-job1"; + + Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-request + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-request + + //tag::x-pack-ml-get-job-execute + GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT); + long numberOfJobs = response.count(); //<1> + List jobs = response.jobs(); //<2> + //end::x-pack-ml-get-job-execute + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); + + // tag::x-pack-ml-get-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-execute-async + client.machineLearning().getJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testDeleteJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String jobId = "my-first-machine-learning-job"; + + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-delete-ml-job-request + DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-first-machine-learning-job"); + deleteJobRequest.setForce(false); //<1> + DeleteJobResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT); + //end::x-pack-delete-ml-job-request + + //tag::x-pack-delete-ml-job-response + boolean isAcknowledged = deleteJobResponse.isAcknowledged(); //<1> + //end::x-pack-delete-ml-job-response + } + { + //tag::x-pack-delete-ml-job-request-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DeleteJobResponse deleteJobResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-delete-ml-job-request-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + //tag::x-pack-delete-ml-job-request-async + DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-second-machine-learning-job"); + client.machineLearning().deleteJobAsync(deleteJobRequest, RequestOptions.DEFAULT, listener); // <1> + //end::x-pack-delete-ml-job-request-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testOpenJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -143,7 +282,6 @@ public void testOpenJob() throws Exception { //end::x-pack-ml-open-job-execute } - { //tag::x-pack-ml-open-job-listener ActionListener listener = new ActionListener() { @@ -154,7 +292,7 @@ public void onResponse(OpenJobResponse openJobResponse) { @Override public void onFailure(Exception e) { - //<2> + // <2> } }; //end::x-pack-ml-open-job-listener @@ -169,6 +307,370 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + } + + public void testCloseJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + Job job = MachineLearningIT.buildJob("closing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-request + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); //<1> + closeJobRequest.setForce(false); //<2> + closeJobRequest.setAllowNoJobs(true); //<3> + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<4> + //end::x-pack-ml-close-job-request + + //tag::x-pack-ml-close-job-execute + CloseJobResponse closeJobResponse = client.machineLearning().closeJob(closeJobRequest, RequestOptions.DEFAULT); + boolean isClosed = closeJobResponse.isClosed(); //<1> + //end::x-pack-ml-close-job-execute + + } + { + Job job = MachineLearningIT.buildJob("closing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(CloseJobResponse closeJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-close-job-listener + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-close-job-execute-async + client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-close-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetBuckets() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-buckets"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a bucket + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-buckets\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 80.0}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-buckets-request + GetBucketsRequest request = new GetBucketsRequest(jobId); // <1> + // end::x-pack-ml-get-buckets-request + + // tag::x-pack-ml-get-buckets-timestamp + request.setTimestamp("2018-08-17T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-timestamp + + // Set timestamp to null as it is incompatible with other args + request.setTimestamp(null); + + // tag::x-pack-ml-get-buckets-anomaly-score + request.setAnomalyScore(75.0); // <1> + // end::x-pack-ml-get-buckets-anomaly-score + + // tag::x-pack-ml-get-buckets-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-buckets-desc + + // tag::x-pack-ml-get-buckets-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-end + + // tag::x-pack-ml-get-buckets-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-buckets-exclude-interim + + // tag::x-pack-ml-get-buckets-expand + request.setExpand(true); // <1> + // end::x-pack-ml-get-buckets-expand + + // tag::x-pack-ml-get-buckets-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-buckets-page + + // Set page params back to null so the response contains the bucket we indexed + request.setPageParams(null); + // tag::x-pack-ml-get-buckets-sort + request.setSort("anomaly_score"); // <1> + // end::x-pack-ml-get-buckets-sort + + // tag::x-pack-ml-get-buckets-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-start + + // tag::x-pack-ml-get-buckets-execute + GetBucketsResponse response = client.machineLearning().getBuckets(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-buckets-execute + + // tag::x-pack-ml-get-buckets-response + long count = response.count(); // <1> + List buckets = response.buckets(); // <2> + // end::x-pack-ml-get-buckets-response + assertEquals(1, buckets.size()); + } + { + GetBucketsRequest request = new GetBucketsRequest(jobId); + + // tag::x-pack-ml-get-buckets-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetBucketsResponse getBucketsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-buckets-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-buckets-execute-async + client.machineLearning().getBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-buckets-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testFlushJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("flushing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("flushing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(secondJob.getId()), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-flush-job-request + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); //<1> + //end::x-pack-ml-flush-job-request + + //tag::x-pack-ml-flush-job-request-options + flushJobRequest.setCalcInterim(true); //<1> + flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); //<2> + flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); //<3> + flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); //<4> + flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); //<5> + //end::x-pack-ml-flush-job-request-options + + //tag::x-pack-ml-flush-job-execute + FlushJobResponse flushJobResponse = client.machineLearning().flushJob(flushJobRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-flush-job-execute + + //tag::x-pack-ml-flush-job-response + boolean isFlushed = flushJobResponse.isFlushed(); //<1> + Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); //<2> + //end::x-pack-ml-flush-job-response + + } + { + //tag::x-pack-ml-flush-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(FlushJobResponse FlushJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-flush-job-listener + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-flush-job-execute-async + client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-flush-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testGetJobStats() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("get-machine-learning-job-stats1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job-stats2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-stats-request + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-stats-request + + //tag::x-pack-ml-get-job-stats-execute + GetJobStatsResponse response = client.machineLearning().getJobStats(request, RequestOptions.DEFAULT); + //end::x-pack-ml-get-job-stats-execute + + //tag::x-pack-ml-get-job-stats-response + long numberOfJobStats = response.count(); //<1> + List jobStats = response.jobStats(); //<2> + //end::x-pack-ml-get-job-stats-response + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); + + // tag::x-pack-ml-get-job-stats-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobStatsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-stats-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-stats-execute-async + client.machineLearning().getJobStatsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-stats-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetRecords() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-records"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a record + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-records\", \"result_type\":\"record\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"record_score\": 80.0}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-records-request + GetRecordsRequest request = new GetRecordsRequest(jobId); // <1> + // end::x-pack-ml-get-records-request + + // tag::x-pack-ml-get-records-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-records-desc + + // tag::x-pack-ml-get-records-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-records-end + + // tag::x-pack-ml-get-records-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-records-exclude-interim + + // tag::x-pack-ml-get-records-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-records-page + + // Set page params back to null so the response contains the record we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-records-record-score + request.setRecordScore(75.0); // <1> + // end::x-pack-ml-get-records-record-score + + // tag::x-pack-ml-get-records-sort + request.setSort("probability"); // <1> + // end::x-pack-ml-get-records-sort + + // tag::x-pack-ml-get-records-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-records-start + + // tag::x-pack-ml-get-records-execute + GetRecordsResponse response = client.machineLearning().getRecords(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-records-execute + + // tag::x-pack-ml-get-records-response + long count = response.count(); // <1> + List records = response.records(); // <2> + // end::x-pack-ml-get-records-response + assertEquals(1, records.size()); + } + { + GetRecordsRequest request = new GetRecordsRequest(jobId); + + // tag::x-pack-ml-get-records-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetRecordsResponse getRecordsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-records-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-records-execute-async + client.machineLearning().getRecordsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-records-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java index b1374ca85b6a9..fc38090ef5b5b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java @@ -17,8 +17,6 @@ * under the License. */ -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -27,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; @@ -43,7 +42,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -193,11 +191,9 @@ private void putStoredScript(String id, StoredScriptSource scriptSource) throws final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); // TODO: change to HighLevel PutStoredScriptRequest when it will be ready // so far - using low-level REST API - Response putResponse = - adminClient() - .performRequest("PUT", "/_scripts/" + id, emptyMap(), - new StringEntity("{\"script\":" + script + "}", - ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_scripts/" + id); + request.setJsonEntity("{\"script\":" + script + "}"); + Response putResponse = adminClient().performRequest(request); assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java new file mode 100644 index 0000000000000..cf5f5ca3c0fb8 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobRequestTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class CloseJobRequestTests extends AbstractXContentTestCase { + + public void testCloseAllJobsRequest() { + CloseJobRequest request = CloseJobRequest.closeAllJobsRequest(); + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testWithNullJobIds() { + Exception exception = expectThrows(IllegalArgumentException.class, CloseJobRequest::new); + assertEquals(exception.getMessage(), "jobIds must not be empty"); + + exception = expectThrows(NullPointerException.class, () -> new CloseJobRequest("job1", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + + @Override + protected CloseJobRequest createTestInstance() { + int jobCount = randomIntBetween(1, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + CloseJobRequest request = new CloseJobRequest(jobIds.toArray(new String[0])); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + if (randomBoolean()) { + request.setTimeout(TimeValue.timeValueMinutes(randomIntBetween(1, 10))); + } + + if (randomBoolean()) { + request.setForce(randomBoolean()); + } + + return request; + } + + @Override + protected CloseJobRequest doParseInstance(XContentParser parser) throws IOException { + return CloseJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java new file mode 100644 index 0000000000000..04389a3af7e14 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/CloseJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class CloseJobResponseTests extends AbstractXContentTestCase { + + @Override + protected CloseJobResponse createTestInstance() { + return new CloseJobResponse(randomBoolean()); + } + + @Override + protected CloseJobResponse doParseInstance(XContentParser parser) throws IOException { + return CloseJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java new file mode 100644 index 0000000000000..d3ccb98eeb68a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.test.ESTestCase; + +public class DeleteJobRequestTests extends ESTestCase { + + private DeleteJobRequest createTestInstance() { + return new DeleteJobRequest(JobTests.randomValidJobId()); + } + + public void test_WithNullJobId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> new DeleteJobRequest(null)); + assertEquals("[job_id] must not be null", ex.getMessage()); + + ex = expectThrows(NullPointerException.class, () -> createTestInstance().setJobId(null)); + assertEquals("[job_id] must not be null", ex.getMessage()); + } + + public void test_WithForce() { + DeleteJobRequest deleteJobRequest = createTestInstance(); + assertFalse(deleteJobRequest.isForce()); + + deleteJobRequest.setForce(true); + assertTrue(deleteJobRequest.isForce()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java new file mode 100644 index 0000000000000..2eb4d51e19180 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DeleteJobResponseTests extends AbstractXContentTestCase { + + @Override + protected DeleteJobResponse createTestInstance() { + return new DeleteJobResponse(); + } + + @Override + protected DeleteJobResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java new file mode 100644 index 0000000000000..c2bddd436ccd5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class FlushJobRequestTests extends AbstractXContentTestCase { + + @Override + protected FlushJobRequest createTestInstance() { + FlushJobRequest request = new FlushJobRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setCalcInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAdvanceTime(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setSkipTime(String.valueOf(randomLong())); + } + return request; + } + + @Override + protected FlushJobRequest doParseInstance(XContentParser parser) throws IOException { + return FlushJobRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java new file mode 100644 index 0000000000000..bc968ff4564ab --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Date; + +public class FlushJobResponseTests extends AbstractXContentTestCase { + + @Override + protected FlushJobResponse createTestInstance() { + return new FlushJobResponse(randomBoolean(), + randomBoolean() ? null : new Date(randomNonNegativeLong())); + } + + @Override + protected FlushJobResponse doParseInstance(XContentParser parser) throws IOException { + return FlushJobResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java new file mode 100644 index 0000000000000..d637988691226 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetBucketsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsRequest createTestInstance() { + GetBucketsRequest request = new GetBucketsRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTimestamp(String.valueOf(randomLong())); + } else { + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("anomaly_score"); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + } + if (randomBoolean()) { + request.setExpand(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetBucketsRequest doParseInstance(XContentParser parser) throws IOException { + return GetBucketsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java new file mode 100644 index 0000000000000..7b1934c2dfacb --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.BucketTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetBucketsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List buckets = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + Bucket bucket = BucketTests.createTestInstance(jobId); + buckets.add(bucket); + } + return new GetBucketsResponse(buckets, listSize); + } + + @Override + protected GetBucketsResponse doParseInstance(XContentParser parser) throws IOException { + return GetBucketsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java new file mode 100644 index 0000000000000..77b2109dd7c68 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobRequest request = GetJobRequest.getAllJobsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobRequest("job",null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobRequest request = new GetJobRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java new file mode 100644 index 0000000000000..8cc990730f78e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +public class GetJobResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobTests.createRandomizedJobBuilder()); + } + + return new GetJobResponse(results, count); + } + + @Override + protected GetJobResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobResponse.fromXContent(parser); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java new file mode 100644 index 0000000000000..690e582976656 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobStatsRequest request = GetJobStatsRequest.getAllJobStatsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobStatsRequest("job", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobStatsRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobStatsRequest request = new GetJobStatsRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobStatsRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java new file mode 100644 index 0000000000000..23f7bcc042b4a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.job.stats.JobStatsTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobStatsResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobStatsTests.createRandomInstance()); + } + + return new GetJobStatsResponse(results, count); + } + + @Override + protected GetJobStatsResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java new file mode 100644 index 0000000000000..226ffe75b01e6 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class GetRecordsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetRecordsRequest createTestInstance() { + GetRecordsRequest request = new GetRecordsRequest(ESTestCase.randomAlphaOfLengthBetween(1, 20)); + + if (ESTestCase.randomBoolean()) { + request.setStart(String.valueOf(ESTestCase.randomLong())); + } + if (ESTestCase.randomBoolean()) { + request.setEnd(String.valueOf(ESTestCase.randomLong())); + } + if (ESTestCase.randomBoolean()) { + request.setExcludeInterim(ESTestCase.randomBoolean()); + } + if (ESTestCase.randomBoolean()) { + request.setRecordScore(ESTestCase.randomDouble()); + } + if (ESTestCase.randomBoolean()) { + int from = ESTestCase.randomInt(10000); + int size = ESTestCase.randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (ESTestCase.randomBoolean()) { + request.setSort("anomaly_score"); + } + if (ESTestCase.randomBoolean()) { + request.setDescending(ESTestCase.randomBoolean()); + } + if (ESTestCase.randomBoolean()) { + request.setExcludeInterim(ESTestCase.randomBoolean()); + } + return request; + } + + @Override + protected GetRecordsRequest doParseInstance(XContentParser parser) throws IOException { + return GetRecordsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java new file mode 100644 index 0000000000000..be455f716540d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsResponseTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.AnomalyRecord; +import org.elasticsearch.client.ml.job.results.AnomalyRecordTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetRecordsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetRecordsResponse createTestInstance() { + String jobId = ESTestCase.randomAlphaOfLength(20); + int listSize = ESTestCase.randomInt(10); + List records = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + AnomalyRecord record = AnomalyRecordTests.createTestInstance(jobId); + records.add(record); + } + return new GetRecordsResponse(records, listSize); + } + + @Override + protected GetRecordsResponse doParseInstance(XContentParser parser) throws IOException { + return GetRecordsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java new file mode 100644 index 0000000000000..cee1710a62232 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class NodeAttributesTests extends AbstractXContentTestCase { + + public static NodeAttributes createRandom() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + + @Override + protected NodeAttributes createTestInstance() { + return createRandom(); + } + + @Override + protected NodeAttributes doParseInstance(XContentParser parser) throws IOException { + return NodeAttributes.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java similarity index 93% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java index 242f0cf4e8a5a..c6ce34364462c 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobRequestTests.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java index aadfb236d3a9b..7f177c6e1ef82 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/OpenJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/OpenJobResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java similarity index 89% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java index 165934224b905..b58d849de1fbc 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobRequestTests.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.Job; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java similarity index 92% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java index ed91e33635b29..1f435783d0f18 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/PutJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutJobResponseTests.java @@ -16,10 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml; +package org.elasticsearch.client.ml; +import org.elasticsearch.client.ml.job.config.JobTests; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java index c835788bb1c98..c1c0daaa9384f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/ChunkingConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/ChunkingConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index f45d88d318e01..462eb1466511d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index edbef8461e053..3dddad3c01676 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.datafeed; +package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java index 34f12fc067e75..7b76688f4d31b 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java index 5003da10780d4..cb14d19300d99 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisLimitsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisLimitsTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java index 36fb51ed10e72..889926e00d6fb 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/CategorizationAnalyzerConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/CategorizationAnalyzerConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java index 8ca2dc494f3c5..9c1f361ce0e8d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DataDescriptionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DataDescriptionTests.java @@ -16,12 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription.DataFormat; import org.elasticsearch.test.AbstractXContentTestCase; +import static org.elasticsearch.client.ml.job.config.DataDescription.DataFormat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.Is.is; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java index bc70a404894ab..32c6ca426ca05 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectionRuleTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectionRuleTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java index 0b1ba892acd30..7801447e724fd 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/DetectorTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/DetectorTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java index 00862e5307b9e..cdc79b83524a0 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/FilterRefTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/FilterRefTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java index 7ba4946efa753..1946f70a230d9 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.common.settings.Settings; @@ -210,7 +210,7 @@ public static AnalysisConfig.Builder createAnalysisConfig() { return new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); } - public static Job createRandomizedJob() { + public static Job.Builder createRandomizedJobBuilder() { String jobId = randomValidJobId(); Job.Builder builder = new Job.Builder(jobId); if (randomBoolean()) { @@ -265,7 +265,11 @@ public static Job createRandomizedJob() { if (randomBoolean()) { builder.setResultsIndexName(randomValidJobId()); } - return builder.build(); + return builder; + } + + public static Job createRandomizedJob() { + return createRandomizedJobBuilder().build(); } @Override diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java index 6c595e2d6da15..5e218a8dce7df 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/MlFilterTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/MlFilterTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java index 23f13c732123a..50f1b49f41443 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/ModelPlotConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/ModelPlotConfigTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java index 4348ea194d01b..3386d3fdc52e2 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleConditionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleConditionTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java index ac97e457ac470..2231b913251ed 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/RuleScopeTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/RuleScopeTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.config; +package org.elasticsearch.client.ml.job.config; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java index 2232e8c88d924..7c261e8d4c915 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/DataCountsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/DataCountsTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -113,7 +113,7 @@ private void assertAllFieldsGreaterThanZero(DataCounts stats) throws Exception { private static DataCounts createCounts( long processedRecordCount, long processedFieldCount, long inputBytes, long inputFieldCount, - long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, + long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, long emptyBucketCount, long sparseBucketCount, long bucketCount, long earliestRecordTime, long latestRecordTime, long lastDataTimeStamp, long latestEmptyBucketTimeStamp, long latestSparseBucketTimeStamp) { diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java index e3341123fb007..4a12a75f2b17d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSizeStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java @@ -16,15 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.protocol.xpack.ml.job.process.ModelSizeStats.MemoryStatus; import java.util.Date; +import static org.elasticsearch.client.ml.job.process.ModelSizeStats.MemoryStatus; + public class ModelSizeStatsTests extends AbstractXContentTestCase { public void testDefaultConstructor() { diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java index 8c6a9bd83c915..9669f9bfa4f4e 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshotTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSnapshotTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.Version; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java index 77ae21bc6f89a..24c70f6d68f66 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/process/QuantilesTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/QuantilesTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.process; +package org.elasticsearch.client.ml.job.process; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java index 070b9f18b4dc4..3ac6a0b6ec4db 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyCauseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyCauseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java index d4cadb19796b5..a857cd3d9b10f 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/AnomalyRecordTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/AnomalyRecordTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -33,7 +33,7 @@ protected AnomalyRecord createTestInstance() { return createTestInstance("foo"); } - public AnomalyRecord createTestInstance(String jobId) { + public static AnomalyRecord createTestInstance(String jobId) { AnomalyRecord anomalyRecord = new AnomalyRecord(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); anomalyRecord.setActual(Collections.singletonList(randomDouble())); anomalyRecord.setTypical(Collections.singletonList(randomDouble())); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java similarity index 99% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java index 7e4c166d1fd67..7b8ba13839849 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketInfluencerTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketInfluencerTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java index 28b1893afe18b..b9fac88faccc0 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -35,7 +35,7 @@ public Bucket createTestInstance() { return createTestInstance("foo"); } - public Bucket createTestInstance(String jobId) { + public static Bucket createTestInstance(String jobId) { Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); @@ -70,7 +70,7 @@ public Bucket createTestInstance(String jobId) { int size = randomInt(10); List records = new ArrayList<>(size); for (int i = 0; i < size; i++) { - AnomalyRecord anomalyRecord = new AnomalyRecordTests().createTestInstance(jobId); + AnomalyRecord anomalyRecord = AnomalyRecordTests.createTestInstance(jobId); records.add(anomalyRecord); } bucket.setRecords(records); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java index 28ef4a5ecb241..27e15a1600d38 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/CategoryDefinitionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java index b029997d015fa..89b2e5dbcbb6d 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluenceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluenceTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java index 8125a1a5c725f..ef83af39958de 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/InfluencerTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/InfluencerTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java similarity index 97% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java index babd7410d57ce..9ee6a2025b692 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/OverallBucketTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/OverallBucketTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.ml.job.results; +package org.elasticsearch.client.ml.job.results; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java new file mode 100644 index 0000000000000..16dfa305479be --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class ForecastStatsTests extends AbstractXContentTestCase { + + @Override + public ForecastStats createTestInstance() { + if (randomBoolean()) { + return createRandom(1, 22); + } + return new ForecastStats(0, null,null,null,null); + } + + @Override + protected ForecastStats doParseInstance(XContentParser parser) throws IOException { + return ForecastStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + public static ForecastStats createRandom(long minTotal, long maxTotal) { + return new ForecastStats( + randomLongBetween(minTotal, maxTotal), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + createCountStats()); + } + + private static Map createCountStats() { + Map countStats = new HashMap<>(); + for (int i = 0; i < randomInt(10); ++i) { + countStats.put(randomAlphaOfLengthBetween(1, 20), randomLongBetween(1L, 100L)); + } + return countStats; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java new file mode 100644 index 0000000000000..5d00f879140e0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.NodeAttributes; +import org.elasticsearch.client.ml.NodeAttributesTests; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.DataCountsTests; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.client.ml.job.process.ModelSizeStatsTests; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + + +public class JobStatsTests extends AbstractXContentTestCase { + + public static JobStats createRandomInstance() { + String jobId = JobTests.randomValidJobId(); + JobState state = randomFrom(JobState.CLOSING, JobState.CLOSED, JobState.OPENED, JobState.FAILED, JobState.OPENING); + DataCounts dataCounts = DataCountsTests.createTestInstance(jobId); + + ModelSizeStats modelSizeStats = randomBoolean() ? ModelSizeStatsTests.createRandomized() : null; + ForecastStats forecastStats = randomBoolean() ? ForecastStatsTests.createRandom(1, 22) : null; + NodeAttributes nodeAttributes = randomBoolean() ? NodeAttributesTests.createRandom() : null; + String assigmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null; + TimeValue openTime = randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(1, 10000)) : null; + + return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assigmentExplanation, openTime); + } + + @Override + protected JobStats createTestInstance() { + return createRandomInstance(); + } + + @Override + protected JobStats doParseInstance(XContentParser parser) throws IOException { + return JobStats.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java new file mode 100644 index 0000000000000..eb9e47af9ba28 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class SimpleStatsTests extends AbstractXContentTestCase { + + @Override + protected SimpleStats createTestInstance() { + return createRandom(); + } + + @Override + protected SimpleStats doParseInstance(XContentParser parser) throws IOException { + return SimpleStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public static SimpleStats createRandom() { + return new SimpleStats(randomDouble(), randomDouble(), randomDouble(), randomDouble()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java new file mode 100644 index 0000000000000..f74cedf1437d1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.util; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class PageParamsTests extends AbstractXContentTestCase { + + @Override + protected PageParams doParseInstance(XContentParser parser) { + return PageParams.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected PageParams createTestInstance() { + Integer from = randomBoolean() ? randomInt() : null; + Integer size = randomBoolean() ? randomInt() : null; + return new PageParams(from, size); + } +} diff --git a/client/rest/build.gradle b/client/rest/build.gradle index fc2ab0bc4c05d..273836a31f0cb 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -16,9 +18,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -53,10 +52,9 @@ dependencies { testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" } -forbiddenApisMain { +tasks.withType(ForbiddenApisCliTask) { //client does not depend on server, so only jdk and http signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] + replaceSignatureFiles ('jdk-signatures', 'http-signatures') } forbiddenPatterns { @@ -67,9 +65,6 @@ forbiddenApisTest { //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' - //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] } // JarHell is part of es server, which we don't want to pull in diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 934b952608674..a7afbc8ffbd6d 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -85,7 +85,7 @@ * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later * by calling {@link #setNodes(Collection)}. *

- * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When + * The method {@link #performRequest(Request)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that @@ -145,17 +145,6 @@ public static RestClientBuilder builder(HttpHost... hosts) { return new RestClientBuilder(hostsToNodes(hosts)); } - /** - * Replaces the hosts with which the client communicates. - * - * @deprecated prefer {@link #setNodes(Collection)} because it allows you - * to set metadata for use with {@link NodeSelector}s - */ - @Deprecated - public void setHosts(HttpHost... hosts) { - setNodes(hostsToNodes(hosts)); - } - /** * Replaces the nodes with which the client communicates. */ @@ -251,234 +240,6 @@ public void performRequestAsync(Request request, ResponseListener responseListen } } - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters - * and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, Header...)} - * which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns - * its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts - * are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times - * they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead - * nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * This method works by performing an asynchronous call and waiting - * for the result. If the asynchronous call throws an exception we wrap - * it and rethrow it so that the stack trace attached to the exception - * contains the call site. While we attempt to preserve the original - * exception this isn't always possible and likely haven't covered all of - * the cases. You can get the original exception from - * {@link Exception#getCause()}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. - * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, ResponseListener, - * Header...)} which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously - * and the provided {@link ResponseListener} gets notified upon request completion or failure. - * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain - * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, - * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried - * until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException { Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es @@ -1035,42 +796,4 @@ public void remove() { itr.remove(); } } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addHeaders(Request request, Header... headers) { - setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers); - } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - options.addHeader(header.getName(), header.getValue()); - } - options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); - request.setOptions(options); - } - - /** - * Add all parameters from a map to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addParameters(Request request, Map parameters) { - Objects.requireNonNull(parameters, "parameters cannot be null"); - for (Map.Entry entry : parameters.entrySet()) { - request.addParameter(entry.getKey(), entry.getValue()); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 6b5bb3c98ee5b..fb58f18d42af0 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -45,7 +45,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -215,9 +214,15 @@ public void testHeaders() throws IOException { } final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : requestHeaders) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); + esResponse = restClient.performRequest(request); } catch (ResponseException e) { esResponse = e.getResponse(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index cb326f4a24c8d..0c589e6a40c8a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -59,7 +59,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; -import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -69,7 +68,6 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; -import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; import static org.elasticsearch.client.SyncResponseListenerTests.assertExceptionStackContainsCallingMethod; import static org.hamcrest.CoreMatchers.equalTo; @@ -192,7 +190,7 @@ public void testInternalHttpRequest() throws Exception { public void testOkStatusCodes() throws IOException { for (String method : getHttpMethods()) { for (int okStatusCode : getOkStatusCodes()) { - Response response = performRequest(method, "/" + okStatusCode); + Response response = restClient.performRequest(new Request(method, "/" + okStatusCode)); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); } } @@ -223,13 +221,11 @@ public void testErrorStatusCodes() throws IOException { //error status codes should cause an exception to be thrown for (int errorStatusCode : getAllErrorStatusCodes()) { try { - Map params; - if (ignoreParam.isEmpty()) { - params = Collections.emptyMap(); - } else { - params = Collections.singletonMap("ignore", ignoreParam); + Request request = new Request(method, "/" + errorStatusCode); + if (false == ignoreParam.isEmpty()) { + request.addParameter("ignore", ignoreParam); } - Response response = performRequest(method, "/" + errorStatusCode, params); + Response response = restClient.performRequest(request); if (expectedIgnores.contains(errorStatusCode)) { //no exception gets thrown although we got an error status code, as it was configured to be ignored assertEquals(errorStatusCode, response.getStatusLine().getStatusCode()); @@ -256,14 +252,14 @@ public void testIOExceptions() { for (String method : getHttpMethods()) { //IOExceptions should be let bubble up try { - performRequest(method, "/coe"); + restClient.performRequest(new Request(method, "/coe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } failureListener.assertCalled(singletonList(node)); try { - performRequest(method, "/soe"); + restClient.performRequest(new Request(method, "/soe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); @@ -313,48 +309,6 @@ public void testBody() throws IOException { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void tesPerformRequestOldStyleNullHeaders() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - performRequest(method, "/" + statusCode, (Header[])null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request headers must not be null", e.getMessage()); - } - try { - performRequest(method, "/" + statusCode, (Header)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request header must not be null", e.getMessage()); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformRequestOldStyleWithNullParams() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - restClient.performRequest(method, "/" + statusCode, (Map)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - try { - restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - } - /** * End to end test for request and response headers. Exercises the mock http client ability to send back * whatever headers it has received. @@ -464,35 +418,4 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { } return expectedRequest; } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Header... headers) throws IOException { - return performRequest(method, endpoint, Collections.emptyMap(), headers); - } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - int methodSelector; - if (params.isEmpty()) { - methodSelector = randomIntBetween(0, 2); - } else { - methodSelector = randomIntBetween(1, 2); - } - switch(methodSelector) { - case 0: - return restClient.performRequest(method, endpoint, headers); - case 1: - return restClient.performRequest(method, endpoint, params, headers); - case 2: - return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers); - default: - throw new UnsupportedOperationException(); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ef94b70542f6c..4a037b18404a1 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -42,7 +42,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonList; -import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; @@ -90,88 +89,6 @@ public void onFailure(Exception exception) { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullParams() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - ResponseListener listener = new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }; - restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testPerformAsyncWithWrongEndpoint() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { @@ -195,33 +112,6 @@ public void onFailure(Exception exception) { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testBuildUriLeavesPathUntouched() { final Map emptyMap = Collections.emptyMap(); { @@ -259,34 +149,6 @@ public void testBuildUriLeavesPathUntouched() { } } - @Deprecated - public void testSetHostsWrongArguments() throws IOException { - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - public void testSetNodesWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setNodes(null); @@ -348,23 +210,6 @@ public void testSetNodesDuplicatedHosts() throws Exception { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. - */ - @Deprecated - public void testNullPath() throws IOException { - try (RestClient restClient = createRestClient()) { - for (String method : getHttpMethods()) { - try { - restClient.performRequest(method, null); - fail("path set to null should fail!"); - } catch (NullPointerException e) { - assertEquals("endpoint cannot be null", e.getMessage()); - } - } - } - } - public void testSelectHosts() throws IOException { Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 41146e0b7ec08..6ba69c5713c57 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -55,7 +52,7 @@ dependencies { forbiddenApisMain { //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -63,7 +60,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/client/test/build.gradle b/client/test/build.gradle index cc69a1828dc85..e66d2be57f1ea 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -16,10 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 @@ -36,7 +32,7 @@ dependencies { forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -44,7 +40,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } // JarHell is part of es server, which we don't want to pull in diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 944a038edd97c..269a37105fb19 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,8 +44,7 @@ dependencyLicenses { forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions { diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java index fab809a51bcc2..756d26745b2cb 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseTests.java @@ -53,7 +53,7 @@ public void setupIndex() throws IOException { @After public void cleanupIndex() throws IOException { - client().performRequest("DELETE", indexName()); + client().performRequest(new Request("DELETE", indexName())); } private String indexName() { diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index ad9b56fec0502..6d18b79d4bddf 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -1,11 +1,11 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 // java_version_checker do not depend on core so only JDK signatures should be checked -forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +forbiddenApisMain { + replaceSignatureFiles 'jdk-signatures' +} test.enabled = false namingConventions.enabled = false diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index a774691b2eb17..ca1aa6bcac9d6 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -17,8 +17,9 @@ * under the License. */ -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion + + +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask apply plugin: 'elasticsearch.build' @@ -31,10 +32,9 @@ dependencies { archivesBaseName = 'elasticsearch-launchers' -// java_version_checker do not depend on core so only JDK signatures should be checked -List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -forbiddenApisMain.signaturesURLs = jdkSignatures -forbiddenApisTest.signaturesURLs = jdkSignatures +tasks.withType(ForbiddenApisCliTask) { + replaceSignatureFiles 'jdk-signatures' +} namingConventions { testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase' diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index c47786299bc2f..38be8db42ff6a 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -39,3 +39,9 @@ test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/docs/build.gradle b/docs/build.gradle index 029147bba2ff0..c6a7a8d48374b 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -19,6 +19,22 @@ apply plugin: 'elasticsearch.docs-test' +/* List of files that have snippets that will not work until platinum tests can occur ... */ +buildRestTests.expectedUnconvertedCandidates = [ + 'reference/ml/transforms.asciidoc', + 'reference/ml/apis/delete-calendar-event.asciidoc', + 'reference/ml/apis/get-bucket.asciidoc', + 'reference/ml/apis/get-category.asciidoc', + 'reference/ml/apis/get-influencer.asciidoc', + 'reference/ml/apis/get-job-stats.asciidoc', + 'reference/ml/apis/get-overall-buckets.asciidoc', + 'reference/ml/apis/get-record.asciidoc', + 'reference/ml/apis/get-snapshot.asciidoc', + 'reference/ml/apis/post-data.asciidoc', + 'reference/ml/apis/revert-snapshot.asciidoc', + 'reference/ml/apis/update-snapshot.asciidoc', +] + integTestCluster { /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ @@ -74,6 +90,17 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build' // Just syntax examples exclude 'README.asciidoc' + // Broken code snippet tests + exclude 'reference/rollup/rollup-getting-started.asciidoc' + exclude 'reference/rollup/apis/rollup-job-config.asciidoc' + exclude 'reference/rollup/apis/rollup-index-caps.asciidoc' + exclude 'reference/rollup/apis/put-job.asciidoc' + exclude 'reference/rollup/apis/stop-job.asciidoc' + exclude 'reference/rollup/apis/start-job.asciidoc' + exclude 'reference/rollup/apis/rollup-search.asciidoc' + exclude 'reference/rollup/apis/delete-job.asciidoc' + exclude 'reference/rollup/apis/get-job.asciidoc' + exclude 'reference/rollup/apis/rollup-caps.asciidoc' } listSnippets.docs = buildRestTests.docs @@ -400,25 +427,25 @@ buildRestTests.setups['stored_scripted_metric_script'] = ''' - do: put_script: id: "my_init_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions = []" } } + body: { "script": { "lang": "painless", "source": "state.transactions = []" } } - match: { acknowledged: true } - do: put_script: id: "my_map_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } + body: { "script": { "lang": "painless", "source": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } - match: { acknowledged: true } - do: put_script: id: "my_combine_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in params._agg.transactions) { profit += t; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in state.transactions) { profit += t; } return profit" } } - match: { acknowledged: true } - do: put_script: id: "my_reduce_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in params._aggs) { profit += a; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in states) { profit += a; } return profit" } } - match: { acknowledged: true } ''' @@ -594,3 +621,480 @@ buildRestTests.setups['library'] = ''' {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +buildRestTests.setups['sensor_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } +''' +buildRestTests.setups['sensor_started_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + + - do: + bulk: + index: sensor-1 + type: _doc + refresh: true + body: | + {"index":{}} + {"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"} + {"index":{}} + {"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"} + {"index":{}} + {"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"} + {"index":{}} + {"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"} + {"index":{}} + {"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"} + {"index":{}} + {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} + + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "* * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } + - do: + xpack.rollup.start_job: + id: "sensor" +''' + +buildRestTests.setups['sensor_index'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + load: + type: double + net_in: + type: long + net_out: + type: long + hostname: + type: keyword + datacenter: + type: keyword +''' + +buildRestTests.setups['sensor_prefab_data'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + indices.create: + index: sensor_rollup + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + node.terms.value: + type: keyword + temperature.sum.value: + type: double + temperature.max.value: + type: double + temperature.min.value: + type: double + timestamp.date_histogram.time_zone: + type: keyword + timestamp.date_histogram.interval: + type: keyword + timestamp.date_histogram.timestamp: + type: date + timestamp.date_histogram._count: + type: long + voltage.avg.value: + type: double + voltage.avg._count: + type: long + _rollup.id: + type: keyword + _rollup.version: + type: long + _meta: + _rollup: + sensor: + cron: "* * * * * ?" + rollup_index: "sensor_rollup" + index_pattern: "sensor-*" + timeout: "20s" + page_size: 1000 + groups: + date_histogram: + delay: "7d" + field: "timestamp" + interval: "1h" + time_zone: "UTC" + terms: + fields: + - "node" + id: sensor + metrics: + - field: "temperature" + metrics: + - min + - max + - sum + - field: "voltage" + metrics: + - avg + + - do: + bulk: + index: sensor_rollup + type: _doc + refresh: true + body: | + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + +''' +buildRestTests.setups['sample_job'] = ''' + - do: + xpack.ml.put_job: + job_id: "sample_job" + body: > + { + "description" : "Very basic job", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "function": "count" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['farequote_index'] = ''' + - do: + indices.create: + index: farequote + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + time: + type: date + responsetime: + type: float + airline: + type: keyword + doc_count: + type: integer +''' +buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index'] + ''' + - do: + bulk: + index: farequote + type: metric + refresh: true + body: | + {"index": {"_id":"1"}} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5} + {"index": {"_id":"2"}} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23} + {"index": {"_id":"3"}} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42} +''' +buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "farequote" + body: > + { + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field": "time" + } + } +''' +buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-farequote" + body: > + { + "job_id":"farequote", + "indexes":"farequote" + } +''' +buildRestTests.setups['server_metrics_index'] = ''' + - do: + indices.create: + index: server-metrics + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + timestamp: + type: date + total: + type: long +''' +buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_metrics_index'] + ''' + - do: + bulk: + index: server-metrics + type: metric + refresh: true + body: | + {"index": {"_id":"1177"}} + {"timestamp":"2017-03-23T13:00:00","total":40476} + {"index": {"_id":"1178"}} + {"timestamp":"2017-03-23T13:00:00","total":15287} + {"index": {"_id":"1179"}} + {"timestamp":"2017-03-23T13:00:00","total":-776} + {"index": {"_id":"1180"}} + {"timestamp":"2017-03-23T13:00:00","total":11366} + {"index": {"_id":"1181"}} + {"timestamp":"2017-03-23T13:00:00","total":3606} + {"index": {"_id":"1182"}} + {"timestamp":"2017-03-23T13:00:00","total":19006} + {"index": {"_id":"1183"}} + {"timestamp":"2017-03-23T13:00:00","total":38613} + {"index": {"_id":"1184"}} + {"timestamp":"2017-03-23T13:00:00","total":19516} + {"index": {"_id":"1185"}} + {"timestamp":"2017-03-23T13:00:00","total":-258} + {"index": {"_id":"1186"}} + {"timestamp":"2017-03-23T13:00:00","total":9551} + {"index": {"_id":"1187"}} + {"timestamp":"2017-03-23T13:00:00","total":11217} + {"index": {"_id":"1188"}} + {"timestamp":"2017-03-23T13:00:00","total":22557} + {"index": {"_id":"1189"}} + {"timestamp":"2017-03-23T13:00:00","total":40508} + {"index": {"_id":"1190"}} + {"timestamp":"2017-03-23T13:00:00","total":11887} + {"index": {"_id":"1191"}} + {"timestamp":"2017-03-23T13:00:00","total":31659} +''' +buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "total-requests" + body: > + { + "description" : "Total sum of requests", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-total-requests" + body: > + { + "job_id":"total-requests", + "indexes":"server-metrics" + } +''' +buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + ''' + - do: + xpack.ml.open_job: + job_id: "total-requests" +''' +buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.start_datafeed: + datafeed_id: "datafeed-total-requests" +''' +buildRestTests.setups['calendar_outages'] = ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } + + +''' +buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" + body: > + { + "job_ids": ["total-requests"] + } +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "events" : [ + { "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"}, + { "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"}, + { "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"} + ]} +''' + + diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index e28ec84f08790..3266d3e365c53 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -132,6 +132,9 @@ The following project appears to be abandoned: * https://github.com/mbuhot/eskotlin[ES Kotlin]: Elasticsearch Query DSL for kotlin based on the {client}/java-api/current/index.html[official Elasticsearch Java client]. + +* https://github.com/jillesvangurp/es-kotlin-wrapper-client[ES Kotlin Wrapper Client]: + Kotlin extension functions and abstractions for the {client}/java-api/current/index.html[official Elasticsearch Highlevel Client]. Aims to reduce the amount of boilerplate needed to do searches, bulk indexing and other common things users do with the client. [[lua]] == Lua diff --git a/docs/java-rest/high-level/document/reindex.asciidoc b/docs/java-rest/high-level/document/reindex.asciidoc new file mode 100644 index 0000000000000..b6d98b42dc509 --- /dev/null +++ b/docs/java-rest/high-level/document/reindex.asciidoc @@ -0,0 +1,215 @@ +[[java-rest-high-document-reindex]] +=== Reindex API + +[[java-rest-high-document-reindex-request]] +==== Reindex Request + +A `ReindexRequest` can be used to copy documents from one or more indexes into a destination index. + +It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt +to set up the destination index. It does not copy the settings of the source index. You should set up the destination +index prior to running a _reindex action, including setting up mappings, shard counts, replicas, etc. + +The simplest form of a `ReindexRequest` looks like follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request] +-------------------------------------------------- +<1> Creates the `ReindexRequest` +<2> Adds a list of sources to copy from +<3> Adds the destination index + +The `dest` element can be configured like the index API to control optimistic concurrency control. Just leaving out +`versionType` (as above) or setting it to internal will cause Elasticsearch to blindly dump documents into the target. +Setting `versionType` to external will cause Elasticsearch to preserve the version from the source, create any documents +that are missing, and update any documents that have an older version in the destination index than they do in the +source index. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-versionType] +-------------------------------------------------- +<1> Set the versionType to `EXTERNAL` + +Setting `opType` to `create` will cause `_reindex` to only create missing documents in the target index. All existing +documents will cause a version conflict. The default `opType` is `index`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-opType] +-------------------------------------------------- +<1> Set the opType to `create` + +By default version conflicts abort the `_reindex` process but you can just count them by settings it to `proceed` +in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `_reindex` uses batches of 1000. You can change the batch size with `sourceBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sourceSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +Reindex can also use the ingest feature by specifying a `pipeline`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-pipeline] +-------------------------------------------------- +<1> set pipeline to `my_pipeline` + +If you want a particular set of documents from the source index you’ll need to use sort. If possible, prefer a more +selective query to size and sort. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sort] +-------------------------------------------------- +<1> add descending sort to`field1` +<2> add ascending sort to `field2` + +`ReindexRequest` also supports a `script` that modifies the document. It allows you to also change the document's +metadata. The following example illustrates that. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-script] +-------------------------------------------------- +<1> `setScript` to increment the `likes` field on all documents with user `kimchy`. + +`ReindexRequest` supports reindexing from a remote Elasticsearch cluster. When using a remote cluster the query should be +specified inside the `RemoteInfo` object and not using `setSourceQuery`. If both the remote info and the source query are +set it results in a validation error during the request. The reason for this is that the remote Elasticsearch may not +understand queries built by the modern query builders. The remote cluster support works all the way back to Elasticsearch +0.90 and the query language has changed since then. When reaching older versions, it is safer to write the query by hand +in JSON. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-remote] +-------------------------------------------------- +<1> set remote elastic cluster + +`ReindexRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`ReindexRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-scroll] +-------------------------------------------------- +<1> set scroll time + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the reindex request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling reindex + + +[[java-rest-high-document-reindex-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute] +-------------------------------------------------- + +[[java-rest-high-document-reindex-async]] +==== Asynchronous Execution + +The asynchronous execution of a reindex request requires both the `ReindexRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-async] +-------------------------------------------------- +<1> The `ReindexRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `ReindexRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-reindex-response]] +==== Reindex Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were updated +<5> Number of docs that were created +<6> Number of docs that were deleted +<7> Number of batches that were executed +<8> Number of skipped docs +<9> Number of version conflicts +<10> Number of times request had to retry bulk index operations +<11> Number of times request had to retry search operations +<12> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<13> Remaining delay of any current throttle sleep or 0 if not sleeping +<14> Failures during search phase +<15> Failures during bulk index operation diff --git a/docs/java-rest/high-level/document/update-by-query.asciidoc b/docs/java-rest/high-level/document/update-by-query.asciidoc new file mode 100644 index 0000000000000..324385a442b5d --- /dev/null +++ b/docs/java-rest/high-level/document/update-by-query.asciidoc @@ -0,0 +1,181 @@ +[[java-rest-high-document-update-by-query]] +=== Update By Query API + +[[java-rest-high-document-update-by-query-request]] +==== Update By Query Request + +A `UpdateByQueryRequest` can be used to update documents in an index. + +It requires an existing index (or a set of indices) on which the update is to be performed. + +The simplest form of a `UpdateByQueryRequest` looks like follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request] +-------------------------------------------------- +<1> Creates the `UpdateByQueryRequest` on a set of indices. + +By default version conflicts abort the `UpdateByQueryRequest` process but you can just count them by settings it to +`proceed` in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `UpdateByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scrollSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +Update by query can also use the ingest feature by specifying a `pipeline`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-pipeline] +-------------------------------------------------- +<1> set pipeline to `my_pipeline` + +`UpdateByQueryRequest` also supports a `script` that modifies the document. The following example illustrates that. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-script] +-------------------------------------------------- +<1> `setScript` to increment the `likes` field on all documents with user `kimchy`. + +`UpdateByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`UpdateByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scroll] +-------------------------------------------------- +<1> set scroll time + +If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match +that routing value. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-routing] +-------------------------------------------------- +<1> set routing + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the update by query request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling update by query + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-indicesOptions] +-------------------------------------------------- +<1> Set indices options + + +[[java-rest-high-document-update-by-query-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute] +-------------------------------------------------- + +[[java-rest-high-document-update-by-query-async]] +==== Asynchronous Execution + +The asynchronous execution of an update by query request requires both the `UpdateByQueryRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-async] +-------------------------------------------------- +<1> The `UpdateByQueryRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `UpdateByQueryRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-update-by-query-execute-listener-response]] +==== Update By Query Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were updated +<5> Number of docs that were deleted +<6> Number of batches that were executed +<7> Number of skipped docs +<8> Number of version conflicts +<9> Number of times request had to retry bulk index operations +<10> Number of times request had to retry search operations +<11> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<12> Remaining delay of any current throttle sleep or 0 if not sleeping +<13> Failures during search phase +<14> Failures during bulk index operation diff --git a/docs/java-rest/high-level/graph/explore.asciidoc b/docs/java-rest/high-level/graph/explore.asciidoc new file mode 100644 index 0000000000000..f2718209f4b90 --- /dev/null +++ b/docs/java-rest/high-level/graph/explore.asciidoc @@ -0,0 +1,53 @@ +[[java-rest-high-x-pack-graph-explore]] +=== X-Pack Graph explore API + +[[java-rest-high-x-pack-graph-explore-execution]] +==== Initial request + +Graph queries are executed using the `explore()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request] +-------------------------------------------------- +<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx` +<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes +of any attached files. In each case, we want to find people or files that have had at least one document connecting them +to projectx. +<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files +discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people +discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a +date range to consider only recent communications but we pass null to consider all connections. +<4> Finally we call the graph explore API with the GraphExploreRequest object. + + +==== Response + +Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively): + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response] +-------------------------------------------------- +<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the +requested exploration this term was first discovered. +<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two +Vertex terms have been sighted together + + +[[java-rest-high-x-pack-graph-expand-execution]] +==== Expanding a client-side Graph + +Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown. + +To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand] +-------------------------------------------------- +<1> Unlike the initial request we do not need to pass a starting query +<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter. +We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all. +<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter + diff --git a/docs/java-rest/high-level/licensing/put-license.asciidoc b/docs/java-rest/high-level/licensing/put-license.asciidoc index a270d658dddb1..945d447317bc1 100644 --- a/docs/java-rest/high-level/licensing/put-license.asciidoc +++ b/docs/java-rest/high-level/licensing/put-license.asciidoc @@ -10,7 +10,7 @@ The license can be added or updated using the `putLicense()` method: -------------------------------------------------- include-tagged::{doc-tests}/LicensingDocumentationIT.java[put-license-execute] -------------------------------------------------- -<1> Set the categories of information to retrieve. The the default is to +<1> Set the categories of information to retrieve. The default is to return no information which is useful for checking if {xpack} is installed but not much else. <2> A JSON document containing the license information. diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index ad4e0613fc14a..662df0f56403a 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -270,7 +270,7 @@ include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-heal helper requires the content type of the response to be passed as an argument and returns a `Map` of objects. Values in the map can be of any type, including inner `Map` that are used to represent the JSON object hierarchy. -<5> Retrieve the value of the `status` field in the response map, casts it as a a `String` +<5> Retrieve the value of the `status` field in the response map, casts it as a `String` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object. This method throws an exception if the value does not corresponds to a valid cluster health status. diff --git a/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc b/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc index f877ed720db69..b432b10d3b8bb 100644 --- a/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc +++ b/docs/java-rest/high-level/miscellaneous/x-pack-info.asciidoc @@ -13,7 +13,7 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-info-execut -------------------------------------------------- <1> Enable verbose mode. The default is `false` but `true` will return more information. -<2> Set the categories of information to retrieve. The the default is to +<2> Set the categories of information to retrieve. The default is to return no information which is useful for checking if {xpack} is installed but not much else. diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc new file mode 100644 index 0000000000000..edadb9f40a214 --- /dev/null +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -0,0 +1,59 @@ +[[java-rest-high-x-pack-ml-close-job]] +=== Close Job API + +The Close Job API provides the ability to close {ml} jobs in the cluster. +It accepts a `CloseJobRequest` object and responds +with a `CloseJobResponse` object. + +[[java-rest-high-x-pack-ml-close-job-request]] +==== Close Job Request + +A `CloseJobRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing job IDs +<2> Optionally used to close a failed job, or to forcefully close a job +which has not responded to its initial close request. +<3> Optionally set to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) +<4> Optionally setting the `timeout` value for how long the +execution should wait for the job to be closed. + +[[java-rest-high-x-pack-ml-close-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute] +-------------------------------------------------- +<1> `isClosed()` from the `CloseJobResponse` indicates if the job was successfully +closed or not. + +[[java-rest-high-x-pack-ml-close-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute-async] +-------------------------------------------------- +<1> The `CloseJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `CloseJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc new file mode 100644 index 0000000000000..44a6a47940955 --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -0,0 +1,49 @@ +[[java-rest-high-x-pack-ml-delete-job]] +=== Delete Job API + +[[java-rest-high-x-pack-machine-learning-delete-job-request]] +==== Delete Job Request + +A `DeleteJobRequest` object requires a non-null `jobId` and can optionally set `force`. +Can be executed as follows: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request] +--------------------------------------------------- +<1> Use to forcefully delete an opened job; +this method is quicker than closing and deleting the job. +Defaults to `false` + +[[java-rest-high-x-pack-machine-learning-delete-job-response]] +==== Delete Job Response + +The returned `DeleteJobResponse` object indicates the acknowledgement of the request: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-response] +--------------------------------------------------- +<1> `isAcknowledged` was the deletion request acknowledged or not + +[[java-rest-high-x-pack-machine-learning-delete-job-async]] +==== Delete Job Asynchronously + +This request can also be made asynchronously. +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-async] +--------------------------------------------------- +<1> The `DeleteJobRequest` to execute and the `ActionListener` to alert on completion or error. + +The deletion request returns immediately. Once the request is completed, the `ActionListener` is +called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when +making the request. + +A typical listener for a `DeleteJobRequest` could be defined as follows: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-listener] +--------------------------------------------------- +<1> The action to be taken when it is completed +<2> What to do when a failure occurs diff --git a/docs/java-rest/high-level/ml/flush-job.asciidoc b/docs/java-rest/high-level/ml/flush-job.asciidoc new file mode 100644 index 0000000000000..1f815bba0d564 --- /dev/null +++ b/docs/java-rest/high-level/ml/flush-job.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-x-pack-ml-flush-job]] +=== Flush Job API + +The Flush Job API provides the ability to flush a {ml} job's +datafeed in the cluster. +It accepts a `FlushJobRequest` object and responds +with a `FlushJobResponse` object. + +[[java-rest-high-x-pack-ml-flush-job-request]] +==== Flush Job Request + +A `FlushJobRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request-options] +-------------------------------------------------- +<1> Set request to calculate the interim results +<2> Set the advanced time to flush to the particular time value +<3> Set the start time for the range of buckets on which +to calculate the interim results (requires `calc_interim` to be `true`) +<4> Set the end time for the range of buckets on which +to calculate interim results (requires `calc_interim` to be `true`) +<5> Set the skip time to skip a particular time value + +[[java-rest-high-x-pack-ml-flush-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-flush-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute-async] +-------------------------------------------------- +<1> The `FlushJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `FlushJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-flush-job-response]] +==== Flush Job Response + +A `FlushJobResponse` contains an acknowledgement and an optional end date for the +last finalized bucket + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-response] +-------------------------------------------------- +<1> `isFlushed()` indicates if the job was successfully flushed or not. +<2> `getLastFinalizedBucketEnd()` provides the timestamp +(in milliseconds-since-the-epoch) of the end of the last bucket that was processed. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-buckets.asciidoc b/docs/java-rest/high-level/ml/get-buckets.asciidoc new file mode 100644 index 0000000000000..81a21d3d18ac1 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-buckets.asciidoc @@ -0,0 +1,125 @@ +[[java-rest-high-x-pack-ml-get-buckets]] +=== Get Buckets API + +The Get Buckets API retrieves one or more bucket results. +It accepts a `GetBucketsRequest` object and responds +with a `GetBucketsResponse` object. + +[[java-rest-high-x-pack-ml-get-buckets-request]] +==== Get Buckets Request + +A `GetBucketsRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-timestamp] +-------------------------------------------------- +<1> The timestamp of the bucket to get. Otherwise it will return all buckets. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-anomaly-score] +-------------------------------------------------- +<1> Buckets with anomaly scores greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-desc] +-------------------------------------------------- +<1> If `true`, the buckets are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end] +-------------------------------------------------- +<1> Buckets with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-expand] +-------------------------------------------------- +<1> If `true`, buckets will include their anomaly records. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of buckets to skip. +`size` specifies the maximum number of buckets to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-sort] +-------------------------------------------------- +<1> The field to sort buckets on. Defaults to `timestamp`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end] +-------------------------------------------------- +<1> Buckets with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-buckets-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-buckets-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute-async] +-------------------------------------------------- +<1> The `GetBucketsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetBucketsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-buckets-response]] +==== Get Buckets Response + +The returned `GetBucketsResponse` contains the requested buckets: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-response] +-------------------------------------------------- +<1> The count of buckets that were matched +<2> The buckets retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job-stats.asciidoc b/docs/java-rest/high-level/ml/get-job-stats.asciidoc new file mode 100644 index 0000000000000..90f7794ae765b --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job-stats.asciidoc @@ -0,0 +1,67 @@ +[[java-rest-high-x-pack-ml-get-job-stats]] +=== Get Job Stats API + +The Get Job Stats API provides the ability to get any number of + {ml} job's statistics in the cluster. +It accepts a `GetJobStatsRequest` object and responds +with a `GetJobStatsResponse` object. + +[[java-rest-high-x-pack-ml-get-job-stats-request]] +==== Get Job Stats Request + +A `GetJobsStatsRequest` object can have any number of `jobId` +entries. However, they all must be non-null. An empty list is the same as +requesting statistics for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-stats-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-job-stats-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute-async] +-------------------------------------------------- +<1> The `GetJobsStatsRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobsStatsResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-get-job-stats-response]] +==== Get Job Stats Response +The returned `GetJobStatsResponse` contains the requested job statistics: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-response] +-------------------------------------------------- +<1> `getCount()` indicates the number of jobs statistics found +<2> `getJobStats()` is the collection of {ml} `JobStats` objects found \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job.asciidoc b/docs/java-rest/high-level/ml/get-job.asciidoc new file mode 100644 index 0000000000000..4ecf70e8e6538 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job.asciidoc @@ -0,0 +1,57 @@ +[[java-rest-high-x-pack-ml-get-job]] +=== Get Job API + +The Get Job API provides the ability to get {ml} jobs in the cluster. +It accepts a `GetJobRequest` object and responds +with a `GetJobResponse` object. + +[[java-rest-high-x-pack-ml-get-job-request]] +==== Get Job Request + +A `GetJobRequest` object gets can have any number of `jobId` or `groupName` +entries. However, they all must be non-null. An empty list is the same as +requesting for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute] +-------------------------------------------------- +<1> `getCount()` from the `GetJobResponse` indicates the number of jobs found +<2> `getJobs()` is the collection of {ml} `Job` objects found + +[[java-rest-high-x-pack-ml-get-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute-async] +-------------------------------------------------- +<1> The `GetJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/get-records.asciidoc b/docs/java-rest/high-level/ml/get-records.asciidoc new file mode 100644 index 0000000000000..40cc185225ee8 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-records.asciidoc @@ -0,0 +1,113 @@ +[[java-rest-high-x-pack-ml-get-records]] +=== Get Records API + +The Get Records API retrieves one or more record results. +It accepts a `GetRecordsRequest` object and responds +with a `GetRecordsResponse` object. + +[[java-rest-high-x-pack-ml-get-records-request]] +==== Get Records Request + +A `GetRecordsRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-desc] +-------------------------------------------------- +<1> If `true`, the records are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-end] +-------------------------------------------------- +<1> Records with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of records to skip. +`size` specifies the maximum number of records to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-record-score] +-------------------------------------------------- +<1> Records with record_score greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-sort] +-------------------------------------------------- +<1> The field to sort records on. Defaults to `influencer_score`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-end] +-------------------------------------------------- +<1> Records with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-records-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-records-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-execute-async] +-------------------------------------------------- +<1> The `GetRecordsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetRecordsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-records-response]] +==== Get Records Response + +The returned `GetRecordsResponse` contains the requested records: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-records-response] +-------------------------------------------------- +<1> The count of records that were matched +<2> The records retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index ad575121818bc..be6a518df193f 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -44,7 +44,7 @@ include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-open-job-exec the execution completes The method does not block and returns immediately. The passed `ActionListener` is used -to notify the caller of completion. A typical `ActionListner` for `OpenJobResponse` may +to notify the caller of completion. A typical `ActionListener` for `OpenJobResponse` may look like ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index a2db3436317c3..b791dbc0f8cfc 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -15,6 +15,8 @@ Single document APIs:: Multi-document APIs:: * <> * <> +* <> +* <> include::document/index.asciidoc[] include::document/get.asciidoc[] @@ -23,6 +25,8 @@ include::document/delete.asciidoc[] include::document/update.asciidoc[] include::document/bulk.asciidoc[] include::document/multi-get.asciidoc[] +include::document/reindex.asciidoc[] +include::document/update-by-query.asciidoc[] == Search APIs @@ -205,10 +209,24 @@ include::licensing/delete-license.asciidoc[] The Java High Level REST Client supports the following Machine Learning APIs: * <> +* <> +* <> * <> +* <> +* <> +* <> +* <> +* <> include::ml/put-job.asciidoc[] +include::ml/get-job.asciidoc[] +include::ml/delete-job.asciidoc[] include::ml/open-job.asciidoc[] +include::ml/close-job.asciidoc[] +include::ml/flush-job.asciidoc[] +include::ml/get-job-stats.asciidoc[] +include::ml/get-buckets.asciidoc[] +include::ml/get-records.asciidoc[] == Migration APIs @@ -227,3 +245,11 @@ The Java High Level REST Client supports the following Watcher APIs: include::watcher/put-watch.asciidoc[] include::watcher/delete-watch.asciidoc[] + +== Graph APIs + +The Java High Level REST Client supports the following Graph APIs: + +* <> + +include::graph/explore.asciidoc[] diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-debugging.asciidoc index 8523116616d18..c141cbc53252a 100644 --- a/docs/painless/painless-debugging.asciidoc +++ b/docs/painless/painless-debugging.asciidoc @@ -5,7 +5,7 @@ Painless doesn't have a https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop[REPL] -and while it'd be nice for it to have one one day, it wouldn't tell you the +and while it'd be nice for it to have one day, it wouldn't tell you the whole story around debugging painless scripts embedded in Elasticsearch because the data that the scripts have access to or "context" is so important. For now the best way to debug embedded scripts is by throwing exceptions at choice diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 2aca959778699..30320def79b2d 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -26,7 +26,7 @@ The only variable that is available is `params`, which can be used to access use The result of the script is always converted to a string. If no context is specified then this context is used by default. -====== Example +*Example* Request: @@ -67,7 +67,7 @@ The following parameters may be specified in `context_setup` for a filter contex document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. index:: The name of an index containing a mapping that is compatable with the document being indexed. -====== Example +*Example* [source,js] ---------------------------------------------------------------- @@ -125,7 +125,7 @@ document:: Contains the document that will be temporarily indexed in-memory and index:: The name of an index containing a mapping that is compatable with the document being indexed. query:: If `_score` is used in the script then a query can specified that will be used to compute a score. -====== Example +*Example* [source,js] ---------------------------------------------------------------- diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-operators-array.asciidoc index acfb87d30af1b..e80a863df2747 100644 --- a/docs/painless/painless-operators-array.asciidoc +++ b/docs/painless/painless-operators-array.asciidoc @@ -254,7 +254,7 @@ and `]` tokens. *Errors* * If a value other than an `int` type value or a value that is castable to an - `int` type value is specified for for a dimension's size. + `int` type value is specified for a dimension's size. *Grammar* diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index ad06cfc0cc5f1..4f2182da056a0 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -1,71 +1,14 @@ [[discovery-file]] === File-Based Discovery Plugin -The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file -in the `config/discovery-file` directory for unicast discovery. +The functionality provided by the `discovery-file` plugin is now available in +Elasticsearch without requiring a plugin. This plugin still exists to ensure +backwards compatibility, but it will be removed in a future version. + +On installation, this plugin creates a file at +`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that +describe how to use it. It is preferable not to install this plugin and instead +to create this file, and its containing directory, using standard tools. :plugin_name: discovery-file include::install_remove.asciidoc[] - -[[discovery-file-usage]] -[float] -==== Using the file-based discovery plugin - -The file-based discovery plugin provides the ability to specify the -unicast hosts list through a simple `unicast_hosts.txt` file that can -be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`: - -[source,yaml] ----- -discovery.zen.hosts_provider: file ----- - -This plugin simply provides a facility to supply the unicast hosts list for -zen discovery through an external file that can be updated at any time by a side process. - -For example, this gives a convenient mechanism for an Elasticsearch instance -that is run in docker containers to be dynamically supplied a list of IP -addresses to connect to for zen discovery when those IP addresses may not be -known at node startup. - -Note that the file-based discovery plugin is meant to augment the unicast -hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore, -if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`, -they will be used in addition to those supplied in `unicast_hosts.txt`. - -Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch -continues to run, the new changes will be picked up by the plugin and the -new hosts list will be used for the next pinging round for master election. - -Upon installation of the plugin, a default `unicast_hosts.txt` file will -be found in the `$CONFIG_DIR/discovery-file` directory. This default file -will contain some comments about what the file should contain. All comments -for this file must appear on their lines starting with `#` (i.e. comments -cannot start in the middle of a line). - -[[discovery-file-format]] -[float] -==== unicast_hosts.txt file format - -The format of the file is to specify one unicast host entry per line. -Each unicast host entry consists of the host (host name or IP address) and -an optional transport port number. If the port number is specified, is must -come immediately after the host (on the same line) separated by a `:`. -If the port number is not specified, a default value of 9300 is used. - -For example, this is an example of `unicast_hosts.txt` for a cluster with -four nodes that participate in unicast discovery, some of which are not -running on the default port: - -[source,txt] ----------------------------------------------------------------- -10.10.10.5 -10.10.10.6:9305 -10.10.10.5:10005 -# an IPv6 address -[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 ----------------------------------------------------------------- - -Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be -specified in brackets with the port coming after the brackets. diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 90f2c685fdaeb..8bffe5193ed7b 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -17,14 +17,11 @@ Integrations are not plugins, but are external tools or modules that make it eas * https://drupal.org/project/elasticsearch_connector[Drupal]: Drupal Elasticsearch integration. -* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]: - Elasticsearch (and Apache Solr) WordPress Plugin - -* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]: +* https://wordpress.org/plugins/elasticpress/[ElasticPress]: Elasticsearch WordPress Plugin -* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]: - Elasticsearch WordPress Plugin +* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]: + Elasticsearch (and Apache Solr) WordPress Plugin * https://doc.tiki.org/Elasticsearch[Tiki Wiki CMS Groupware]: Tiki has native support for Elasticsearch. This provides faster & better diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 8292325738506..e3978e65f4476 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -10,71 +10,66 @@ include::install_remove.asciidoc[] [[repository-gcs-usage]] ==== Getting started -The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1) -to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first -need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new -project. Once your project is created, you must enable the Cloud Storage Service for your project. +The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage] +to connect to the Storage service. If you are using +https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you +must connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +and create a new project. After your project is created, you must enable the +Cloud Storage Service for your project. [[repository-gcs-creating-bucket]] ===== Creating a Bucket -Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket] -as a container for all the data. Buckets are usually created using the -https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically -create buckets. +The Google Cloud Storage service uses the concept of a +https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all +the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin +does not automatically create buckets. To create a new bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Click the "Create Bucket" button -5. Enter the name of the new bucket -6. Select a storage class -7. Select a location -8. Click the "Create" button +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Click the *Create Bucket* button. +5. Enter the name of the new bucket. +6. Select a storage class. +7. Select a location. +8. Click the *Create* button. -The bucket should now be created. +For more detailed instructions, see the +https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google Cloud documentation]. [[repository-gcs-service-authentication]] ===== Service Authentication -The plugin supports two authentication modes: - -* The built-in <>. This mode is -recommended if your Elasticsearch node is running on a Compute Engine virtual machine. - -* Specifying <> credentials. - -[[repository-gcs-using-compute-engine]] -===== Using Compute Engine -When running on Compute Engine, the plugin use Google's built-in authentication mechanism to -authenticate on the Storage service. Compute Engine virtual machines are usually associated to a -default service account. This service account can be found in the VM instance details in the -https://console.cloud.google.com/compute/[Compute Engine console]. - -This is the default authentication mode and requires no configuration. - -NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM -creation time, when "Storage" access can be configured to "Read/Write" permission. Check your -instance details at the section "Cloud API access scopes". +The plugin must authenticate the requests it makes to the Google Cloud Storage +service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials]. +However, that strategy is **not** supported for use with Elasticsearch. The +plugin operates under the Elasticsearch process, which runs with the security +manager enabled. The security manager obstructs the "automatic" credential discovery. +Therefore, you must configure <> +credentials even if you are using an environment that does not normally require +this configuration (such as Compute Engine, Kubernetes Engine or App Engine). [[repository-gcs-using-service-account]] ===== Using a Service Account -If your Elasticsearch node is not running on Compute Engine, or if you don't want to use Google's -built-in authentication mechanism, you can authenticate on the Storage service using a -https://cloud.google.com/iam/docs/overview#service_account[Service Account] file. +You have to obtain and provide https://cloud.google.com/iam/docs/overview#service_account[service account credentials] +manually. + +For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation]. +Note that the PKCS12 format is not supported by this plugin. -To create a service account file: +Here is a summary of the steps: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/permissions[Permission] tab -4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab -5. Click on "Create service account" -6. Once created, select the new service account and download a JSON key file +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. +5. Click *Create service account*. +6. After the account is created, select it and download a JSON key file. -A service account file looks like this: +A JSON service account file looks like this: [source,js] ---- @@ -84,19 +79,26 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "..." + "client_id": "...", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" } ---- // NOTCONSOLE -This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name -of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration. -The default client name is `default`, but a different client name can be specified in repository -settings using `client`. +To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME` +is the name of the client configuration for the repository. The implicit client +name is `default`, but a different client name can be specified in the +repository settings with the `client` key. -For example, if specifying the credentials file in the keystore under -`gcs.client.my_alternate_client.credentials_file`, you can configure a repository to use these -credentials like this: +NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment +variable is **not** supported. + +For example, if you added a `gcs.client.my_alternate_client.credentials_file` +setting in the keystore, you can configure a repository to use those credentials +like this: [source,js] ---- @@ -113,19 +115,18 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, the internal `gcs` clients, used to transfer the -snapshot contents, will utilize the latest settings from the keystore. - +After you reload the settings, the internal `gcs` clients, which are used to +transfer the snapshot contents, utilize the latest settings from the keystore. -NOTE: In progress snapshot/restore jobs will not be preempted by a *reload* -of the client's `credentials_file` settings. They will complete using the client -as it was built when the operation started. +NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload* +of the client's `credentials_file` settings. They complete using the client as +it was built when the operation started. [[repository-gcs-client]] ==== Client Settings The client used to connect to Google Cloud Storage has a number of settings available. -Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and specified +Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and are specified inside `elasticsearch.yml`. The default client name looked up by a `gcs` repository is called `default`, but can be customized with the repository setting `client`. @@ -146,7 +147,7 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] Some settings are sensitive and must be stored in the -{ref}/secure-settings.html[elasticsearch keystore]. This is the case for the service account file: +{ref}/secure-settings.html[Elasticsearch keystore]. This is the case for the service account file: [source,sh] ---- @@ -185,7 +186,7 @@ are marked as `Secure`. `project_id`:: - The Google Cloud project id. This will be automatically infered from the credentials file but + The Google Cloud project id. This will be automatically inferred from the credentials file but can be specified explicitly. For example, it can be used to switch between projects when the same credentials are usable for both the production and the development projects. @@ -248,8 +249,8 @@ The following settings are supported: The service account used to access the bucket must have the "Writer" access to the bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Select the bucket and "Edit bucket permission" -5. The service account must be configured as a "User" with "Writer" access +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Select the bucket and "Edit bucket permission". +5. The service account must be configured as a "User" with "Writer" access. diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index 3805b2e564ca4..e2b3c8ec59176 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -144,7 +144,7 @@ Possible response: }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index b6595c0d05cc1..0a8a46a0b67b6 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -433,8 +433,8 @@ Scripts can be inline (as in above example), indexed or stored on disk. For deta Available parameters in the script are [horizontal] -`_subset_freq`:: Number of documents the term appears in in the subset. -`_superset_freq`:: Number of documents the term appears in in the superset. +`_subset_freq`:: Number of documents the term appears in the subset. +`_superset_freq`:: Number of documents the term appears in the superset. `_subset_size`:: Number of documents in the subset. `_superset_size`:: Number of documents in the superset. diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index b05c56b880560..09fa707c5db3b 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -307,7 +307,7 @@ POST /_search ===== stdDev Function -This function accepts a collection of doubles and and average, then returns the standard deviation of the values in that window. +This function accepts a collection of doubles and average, then returns the standard deviation of the values in that window. `null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index eab74f1f9f0d2..d1ea1fad88515 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -22,7 +22,7 @@ node-0 flush 0 0 0 ... node-0 write 0 0 0 -------------------------------------------------- -// TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)+/] +// TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/] // TESTRESPONSE[s/\d+/\\d+/ _cat] // The substitutions do two things: // 1. Expect any number of extra thread pools. This allows us to only list a diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index f093b6ebcfae0..f92e364bae102 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -6,23 +6,70 @@ ["float",id="cluster-nodes"] == Node specification -Most cluster level APIs allow to specify which nodes to execute on (for -example, getting the node stats for a node). Nodes can be identified in -the APIs either using their internal node id, the node name, address, -custom attributes, or just the `_local` node receiving the request. For -example, here are some sample executions of nodes info: +Some cluster-level APIs may operate on a subset of the nodes which can be +specified with _node filters_. For example, the <>, +<>, and <> APIs +can all report results from a filtered set of nodes rather than from all nodes. + +_Node filters_ are written as a comma-separated list of individual filters, +each of which adds or removes nodes from the chosen subset. Each filter can be +one of the following: + +* `_all`, to add all nodes to the subset. +* `_local`, to add the local node to the subset. +* `_master`, to add the currently-elected master node to the subset. +* a node id or name, to add this node to the subset. +* an IP address or hostname, to add all matching nodes to the subset. +* a pattern, using `*` wildcards, which adds all nodes to the subset + whose name, address or hostname matches the pattern. +* `master:true`, `data:true`, `ingest:true` or `coordinating_only:true`, which + respectively add to the subset all master-eligible nodes, all data nodes, + all ingest nodes, and all coordinating-only nodes. +* `master:false`, `data:false`, `ingest:false` or `coordinating_only:false`, + which respectively remove from the subset all master-eligible nodes, all data + nodes, all ingest nodes, and all coordinating-only nodes. +* a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`, + which adds to the subset all nodes with a custom node attribute whose name + and value match the respective patterns. Custom node attributes are + configured by setting properties in the configuration file of the form + `node.attr.attrname: attrvalue`. + +NOTE: node filters run in the order in which they are given, which is important +if using filters that remove nodes from the set. For example +`_all,master:false` means all the nodes except the master-eligible ones, but +`master:false,_all` means the same as `_all` because the `_all` filter runs +after the `master:false` filter. + +NOTE: if no filters are given, the default is to select all nodes. However, if +any filters are given then they run starting with an empty chosen subset. This +means that filters such as `master:false` which remove nodes from the chosen +subset are only useful if they come after some other filters. When used on its +own, `master:false` selects no nodes. + +Here are some examples of the use of node filters with the +<> APIs. [source,js] -------------------------------------------------- -# Local +# If no filters are given, the default is to select all nodes +GET /_nodes +# Explicitly select all nodes +GET /_nodes/_all +# Select just the local node GET /_nodes/_local -# Address -GET /_nodes/10.0.0.3,10.0.0.4 -GET /_nodes/10.0.0.* -# Names +# Select the elected master node +GET /_nodes/_master +# Select nodes by name, which can include wildcards GET /_nodes/node_name_goes_here GET /_nodes/node_name_goes_* -# Attributes (set something like node.attr.rack: 2 in the config) +# Select nodes by address, which can include wildcards +GET /_nodes/10.0.0.3,10.0.0.4 +GET /_nodes/10.0.0.* +# Select nodes by role +GET /_nodes/_all,master:false +GET /_nodes/data:true,ingest:true +GET /_nodes/coordinating_only:true +# Select nodes by custom attribute (e.g. with something like `node.attr.rack: 2` in the configuration file) GET /_nodes/rack:2 GET /_nodes/ra*:2 GET /_nodes/ra*:2* diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index c8fa2c9bf7c40..541ee51a58adb 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,12 +1,23 @@ [[cluster-nodes-hot-threads]] == Nodes hot_threads -An API allowing to get the current hot threads on each node in the -cluster. Endpoints are `/_nodes/hot_threads`, and -`/_nodes/{nodesIds}/hot_threads`. +This API yields a breakdown of the hot threads on each selected node in the +cluster. Its endpoints are `/_nodes/hot_threads` and +`/_nodes/{nodes}/hot_threads`: -The output is plain text with a breakdown of each node's top hot -threads. Parameters allowed are: +[source,js] +-------------------------------------------------- +GET /_nodes/hot_threads +GET /_nodes/nodeId1,nodeId2/hot_threads +-------------------------------------------------- +// CONSOLE + +The first command gets the hot threads of all the nodes in the cluster. The +second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can +be selected using <>. + +The output is plain text with a breakdown of each node's top hot threads. The +allowed parameters are: [horizontal] `threads`:: number of hot threads to provide, defaults to 3. diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc new file mode 100644 index 0000000000000..f02ac8e46576b --- /dev/null +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -0,0 +1,55 @@ +[[cluster-nodes-reload-secure-settings]] +== Nodes Reload Secure Settings + +The cluster nodes reload secure settings API is used to re-read the +local node's encrypted keystore. Specifically, it will prompt the keystore +decryption and reading accross the cluster. The keystore's plain content is +used to reinitialize all compatible plugins. A compatible plugin can be +reinitilized without restarting the node. The operation is +complete when all compatible plugins have finished reinitilizing. Subsequently, +the keystore is closed and any changes to it will not be reflected on the node. + +[source,js] +-------------------------------------------------- +POST _nodes/reload_secure_settings +POST _nodes/nodeId1,nodeId2/reload_secure_settings +-------------------------------------------------- +// CONSOLE +// TEST[setup:node] +// TEST[s/nodeId1,nodeId2/*/] + +The first command reloads the keystore on each node. The seconds allows +to selectively target `nodeId1` and `nodeId2`. The node selection options are +detailed <>. + +Note: It is an error if secure settings are inconsistent across the cluster +nodes, yet this consistency is not enforced whatsoever. Hence, reloading specific +nodes is not standard. It is only justifiable when retrying failed reload operations. + +[float] +[[rest-reload-secure-settings]] +==== REST Reload Secure Settings Response + +The response contains the `nodes` object, which is a map, keyed by the +node id. Each value has the node `name` and an optional `reload_exception` +field. The `reload_exception` field is a serialization of the exception +that was thrown during the reload process, if any. + +[source,js] +-------------------------------------------------- +{ + "_nodes": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "cluster_name": "my_cluster", + "nodes": { + "pQHNt5rXTTWNvUgOrdynKg": { + "name": "node-0" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"my_cluster"/$body.cluster_name/] +// TESTRESPONSE[s/"pQHNt5rXTTWNvUgOrdynKg"/\$node_name/] diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 3dfcc201e7ac4..a53a26873ce98 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -10,7 +10,7 @@ GET /_remote/info ---------------------------------- // CONSOLE -This command returns returns connection and endpoint information keyed by +This command returns connection and endpoint information keyed by the configured remote cluster alias. [float] @@ -25,7 +25,7 @@ the configured remote cluster alias. `num_nodes_connected`:: The number of connected nodes in the remote cluster. -`max_connection_per_cluster`:: +`max_connections_per_cluster`:: The maximum number of connections maintained for the remote cluster. `initial_connect_timeout`:: diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index f076a7b83585a..276a43f660d8b 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -31,7 +31,7 @@ POST /_cluster/reroute // CONSOLE // TEST[skip:doc tests run with only a single node] -It is important to note that that after processing any reroute commands +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3de850418719d..78bccc8bd695d 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -213,3 +213,12 @@ Will return, for example: // 3. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. + +This API can be restricted to a subset of the nodes using the `?nodeId` +parameter, which accepts <>: + +[source,js] +-------------------------------------------------- +GET /_cluster/stats?nodeId=node1,node*,master:false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 2e59da4222436..d6dfa71b76b67 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -127,7 +127,7 @@ might look like: The new `description` field contains human readable text that identifies the particular request that the task is performing such as identifying the search request being performed by a search task like the example above. Other kinds of -task have have different descriptions, like <> which +task have different descriptions, like <> which has the search and the destination, or <> which just has the number of requests and the destination indices. Many requests will only have an empty description because more detailed information about the request is not diff --git a/docs/reference/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc index 3a8b15fbd28c8..956a4637ed31f 100644 --- a/docs/reference/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -1,9 +1,9 @@ [role="xpack"] [testenv="gold+"] [[certgen]] -== certgen +== elasticsearch-certgen -deprecated[6.1,Replaced by <>.] +deprecated[6.1,Replaced by <>.] The `elasticsearch-certgen` command simplifies the creation of certificate authorities (CA), certificate signing requests (CSR), and signed certificates diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 164d2fc0e84f0..134ac1edbd017 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -1,11 +1,11 @@ -[role="xpack"] -[[xpack-commands]] -= {xpack} Commands +[[commands]] += Command line tools [partintro] -- -{xpack} includes commands that help you configure security: +{es} provides the following tools for configuring security and performing other +tasks from the command line: * <> * <> diff --git a/docs/reference/commands/saml-metadata.asciidoc b/docs/reference/commands/saml-metadata.asciidoc index 069c7135c0144..5309f83288f89 100644 --- a/docs/reference/commands/saml-metadata.asciidoc +++ b/docs/reference/commands/saml-metadata.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="gold+"] [[saml-metadata]] -== saml-metadata +== elasticsearch-saml-metadata The `elasticsearch-saml-metadata` command can be used to generate a SAML 2.0 Service Provider Metadata file. diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index a7dcd25d65e0b..e2d4dfdc13d3d 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -4,7 +4,7 @@ == elasticsearch-setup-passwords The `elasticsearch-setup-passwords` command sets the passwords for the built-in -`elastic`, `kibana`, `logstash_system`, and `beats_system` users. +`elastic`, `kibana`, `logstash_system`, `beats_system`, and `apm_system` users. [float] === Synopsis diff --git a/docs/reference/commands/users-command.asciidoc b/docs/reference/commands/users-command.asciidoc index e53e0815c5d7b..cf678f2138dfb 100644 --- a/docs/reference/commands/users-command.asciidoc +++ b/docs/reference/commands/users-command.asciidoc @@ -1,10 +1,7 @@ [role="xpack"] [testenv="gold+"] [[users-command]] -== Users Command -++++ -users -++++ +== elasticsearch-users If you use file-based user authentication, the `elasticsearch-users` command enables you to add and remove users, assign user roles, and manage passwords. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 7ba7e2da63369..1cfc122bee402 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -47,7 +47,7 @@ POST test/_doc/1/_update // TEST[continued] We can add a tag to the list of tags (note, if the tag exists, it -will still add it, since its a list): +will still add it, since it's a list): [source,js] -------------------------------------------------- @@ -65,6 +65,28 @@ POST test/_doc/1/_update // CONSOLE // TEST[continued] +We can remove a tag from the list of tags. Note that the Painless function to +`remove` a tag takes as its parameter the array index of the element you wish +to remove, so you need a bit more logic to locate it while avoiding a runtime +error. Note that if the tag was present more than once in the list, this will +remove only one occurrence of it: + +[source,js] +-------------------------------------------------- +POST test/_doc/1/_update +{ + "script" : { + "source": "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", + "lang": "painless", + "params" : { + "tag" : "blue" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + In addition to `_source`, the following variables are available through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing` and `_now` (the current timestamp). @@ -172,7 +194,7 @@ the request was ignored. "_index": "test", "_type": "_doc", "_id": "1", - "_version": 6, + "_version": 7, "result": "noop" } -------------------------------------------------- diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b89021e1cfe59..c69597e74fd61 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -23,7 +23,7 @@ There are a few concepts that are core to Elasticsearch. Understanding these con [float] === Near Realtime (NRT) -Elasticsearch is a near real time search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. +Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. [float] === Cluster @@ -59,7 +59,7 @@ In a single cluster, you can define as many indexes as you want. deprecated[6.0.0,See <>] -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, eg one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. +A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. [float] === Document @@ -93,7 +93,8 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. + +The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach. By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. @@ -1065,7 +1066,7 @@ In the previous section, we skipped over a little detail called the document sco But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. -The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. +The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. @@ -1140,7 +1141,7 @@ And the response (partially shown): }, "hits" : { "total" : 1000, - "max_score" : 0.0, + "max_score" : null, "hits" : [ ] }, "aggregations" : { diff --git a/docs/reference/images/msi_installer/msi_installer_configuration.png b/docs/reference/images/msi_installer/msi_installer_configuration.png index 058df78792fbb..36bae6cc5199d 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_configuration.png and b/docs/reference/images/msi_installer/msi_installer_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_help.png b/docs/reference/images/msi_installer/msi_installer_help.png index 9b63c512100de..458b0821c1f04 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_help.png and b/docs/reference/images/msi_installer/msi_installer_help.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_installing.png b/docs/reference/images/msi_installer/msi_installer_installing.png index 498d927ce2f5e..590c52371a74e 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_installing.png and b/docs/reference/images/msi_installer/msi_installer_installing.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_locations.png b/docs/reference/images/msi_installer/msi_installer_locations.png index a8c816089748d..ba7151e3714de 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_locations.png and b/docs/reference/images/msi_installer/msi_installer_locations.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_no_service.png b/docs/reference/images/msi_installer/msi_installer_no_service.png index d26bb75f265f6..fbe9a0510c795 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_no_service.png and b/docs/reference/images/msi_installer/msi_installer_no_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_plugins.png b/docs/reference/images/msi_installer/msi_installer_plugins.png index 3a6901a18e220..e58f426a47dc7 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_plugins.png and b/docs/reference/images/msi_installer/msi_installer_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_selected_plugins.png b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png index 8a817c81f8dfd..e58f426a47dc7 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_selected_plugins.png and b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_service.png b/docs/reference/images/msi_installer/msi_installer_service.png index b1445fa2d6af3..c7fae13637b76 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_service.png and b/docs/reference/images/msi_installer/msi_installer_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_success.png b/docs/reference/images/msi_installer/msi_installer_success.png index 337cbc9eb509b..3a58467ae18fc 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_success.png and b/docs/reference/images/msi_installer/msi_installer_success.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_uninstall.png b/docs/reference/images/msi_installer/msi_installer_uninstall.png index 26c9bcb7c9a09..6b5b69a576841 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_uninstall.png and b/docs/reference/images/msi_installer/msi_installer_uninstall.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png index a72fdf2ff912f..7ca413bb299e4 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png index a4842b8d6d01c..e5ee18b520807 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png index 6399c99baacff..3e7496505f742 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png and b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_xpack.png b/docs/reference/images/msi_installer/msi_installer_xpack.png index 3f5f6d975940e..e457a578877bb 100644 Binary files a/docs/reference/images/msi_installer/msi_installer_xpack.png and b/docs/reference/images/msi_installer/msi_installer_xpack.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 54c0c1c1b157c..53de67e55fdf9 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -63,12 +63,6 @@ corruption is detected, it will prevent the shard from being opened. Accepts: Check for both physical and logical corruption. This is much more expensive in terms of CPU and memory usage. -`fix`:: - - Check for both physical and logical corruption. Segments that were reported - as corrupted will be automatically removed. This option *may result in data loss*. - Use with extreme caution! - WARNING: Expert only. Checking shards may take a lot of time on large indices. -- diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index a1e00bac61649..c2b3d700e9b7c 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -67,6 +67,13 @@ process equal to the size of the file being mapped. Before using this class, be sure you have allowed plenty of <>. +[[allow-mmapfs]] +You can restrict the use of the `mmapfs` store type via the setting +`node.store.allow_mmapfs`. This is a boolean setting indicating whether or not +`mmapfs` is allowed. The default is to allow `mmapfs`. This setting is useful, +for example, if you are in an environment where you can not control the ability +to create a lot of memory maps so you need disable the ability to use `mmapfs`. + === Pre-loading data into the file system cache NOTE: This is an expert setting, the details of which may change in the future. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 989fe784790b4..c44c691febd36 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -63,7 +63,7 @@ include::sql/index.asciidoc[] include::monitoring/index.asciidoc[] -include::{xes-repo-dir}/rollup/index.asciidoc[] +include::rollup/index.asciidoc[] include::rest-api/index.asciidoc[] diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 37d48eec2a215..8583afc96ab1f 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -101,8 +101,7 @@ which returns something similar to: "translog_generation" : "2", "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> - "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no": "0" + "max_unsafe_auto_id_timestamp" : "-1" }, "num_docs" : 0 } diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 37c616b234991..0241751a4df3b 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1049,6 +1049,199 @@ understands this to mean `2016-04-01` as is explained in the <>, dissect also extracts structured fields out of a single text field +within a document. However unlike the <>, dissect does not use +https://en.wikipedia.org/wiki/Regular_expression[Regular Expressions]. This allows dissect's syntax to be simple and for +some cases faster than the <>. + +Dissect matches a single text field against a defined pattern. + +For example the following pattern: +[source,txt] +-------------------------------------------------- +%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size} +-------------------------------------------------- +will match a log line of this format: +[source,txt] +-------------------------------------------------- +1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] \"GET /english/venues/cities/images/montpellier/18.gif HTTP/1.0\" 200 3171 +-------------------------------------------------- +and result in a document with the following fields: +[source,js] +-------------------------------------------------- +"doc": { + "_index": "_index", + "_type": "_type", + "_id": "_id", + "_source": { + "request": "/english/venues/cities/images/montpellier/18.gif", + "auth": "-", + "ident": "-", + "verb": "GET", + "@timestamp": "30/Apr/1998:22:00:52 +0000", + "size": "3171", + "clientip": "1.2.3.4", + "httpversion": "1.0", + "status": "200" + } +} +-------------------------------------------------- +// NOTCONSOLE + +A dissect pattern is defined by the parts of the string that will be discarded. In the example above the first part +to be discarded is a single space. Dissect finds this space, then assigns the value of `clientip` is everything up +until that space. +Later dissect matches the `[` and then `]` and then assigns `@timestamp` to everything in-between `[` and `]`. +Paying special attention the parts of the string to discard will help build successful dissect patterns. + +Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do +not have a value, then an exception is thrown and may be handled by the <> directive. +An empty key `%{}` or a <> can be used to match values, but exclude the value from +the final document. All matched values are represented as string data types. The <> +may be used to convert to expected data type. + +Dissect also supports <> that can change dissect's default +behavior. For example you can instruct dissect to ignore certain fields, append fields, skip over padding, etc. +See <> for more information. + +[[dissect-options]] +.Dissect Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to dissect +| `pattern` | yes | - | The pattern to apply to the field +| `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +| ` +|====== + +[source,js] +-------------------------------------------------- +{ + "dissect": { + "field": "message", + "pattern" : "%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}" + } +} +-------------------------------------------------- +// NOTCONSOLE +[[dissect-key-modifiers]] +==== Dissect key modifiers +Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right +of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding +modifiers. + +.Dissect Key Modifiers +[options="header"] +|====== +| Modifier | Name | Position | Example | Description | Details +| `->` | Skip right padding | (far) right | `%{keyname1->}` | Skips any repeated characters to the right | <> +| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> +| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> +| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> +| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> +| ` +|====== + +[[dissect-modifier-skip-right-padding]] +===== Right padding modifier (`->`) + +The algorithm that performs the dissection is very strict in that it requires all characters in the pattern to match +the source string. For example, the pattern `%{fookey} %{barkey}` (1 space), will match the string "foo{nbsp}bar" +(1 space), but will not match the string "foo{nbsp}{nbsp}bar" (2 spaces) since the pattern has only 1 space and the +source string has 2 spaces. + +The right padding modifier helps with this case. Adding the right padding modifier to the pattern `%{fookey->} %{barkey}`, +It will now will match "foo{nbsp}bar" (1 space) and "foo{nbsp}{nbsp}bar" (2 spaces) +and even "foo{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}bar" (10 spaces). + +Use the right padding modifier to allow for repetition of the characters after a `%{keyname->}`. + +The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right +modifier. For example: `%{+keyname/1->}` and `%{->}` + +Right padding modifier example +|====== +| *Pattern* | `%{ts->} %{level}` +| *Input* | 1998-08-10T17:15:42,466{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}WARN +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. + +Right padding modifier with empty key example +|====== +| *Pattern* | `[%{ts}]%{->}[%{level}]` +| *Input* | [1998-08-10T17:15:42,466]{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}[WARN] +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = WARN +|====== + +===== Append modifier (`+`) +[[dissect-modifier-append-key]] +Dissect supports appending two or more results together for the output. +Values are appended left to right. An append separator can be specified. +In this example the append_separator is defined as a space. + +Append modifier example +|====== +| *Pattern* | `%{+name} %{+name} %{+name} %{+name}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = john jacob jingleheimer schmidt +|====== + +===== Append with order modifier (`+` and `/n`) +[[dissect-modifier-append-key-with-order]] +Dissect supports appending two or more results together for the output. +Values are appended based on the order defined (`/n`). An append separator can be specified. +In this example the append_separator is defined as a comma. + +Append with order modifier example +|====== +| *Pattern* | `%{+name/2} %{+name/4} %{+name/3} %{+name/1}` +| *Input* | john jacob jingleheimer schmidt +| *Result* a| +* name = schmidt,john,jingleheimer,jacob +|====== + +===== Named skip key (`?`) +[[dissect-modifier-named-skip-key]] +Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability +it may be desired to give that empty key a name. + +Named skip key modifier example +|====== +| *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]` +| *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] +| *Result* a| +* ip = 1.2.3.4 +* @timestamp = 30/Apr/1998:22:00:52 +0000 +|====== + +===== Reference keys (`*` and `&`) +[[dissect-modifier-reference-keys]] +Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that +partially logs in key/value pairs. Reference keys allow you to maintain that key/value relationship. + +Reference key modifier example +|====== +| *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}` +| *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED +| *Result* a| +* ts = 1998-08-10T17:15:42,466 +* level = ERR +* ip = 1.2.3.4 +* error = REFUSED +|====== + [[dot-expand-processor]] === Dot Expander Processor diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index bdb009167552a..4fbed66449800 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -38,10 +38,10 @@ Dynamic templates are specified as an array of named objects: <3> The mapping that the matched field should use. -Templates are processed in order -- the first matching template wins. New -templates can be appended to the end of the list with the -<> API. If a new template has the same -name as an existing template, it will replace the old version. +Templates are processed in order -- the first matching template wins. When +putting new dynamic templates through the <> API, +all existing templates are overwritten. This allows for dynamic templates to be +reordered or deleted after they were initially added. [[match-mapping-type]] ==== `match_mapping_type` diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 95704c6c8bbe3..fe7c6881a064f 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -2,6 +2,9 @@ === `ignore_above` Strings longer than the `ignore_above` setting will not be indexed or stored. +For arrays of strings, `ignore_above` will be applied for each array element separately and string elements longer than `ignore_above` will not be indexed or stored. + +NOTE: All strings/array elements will still be present in the `_source` field, if the latter is enabled which is the default in Elasticsearch. [source,js] -------------------------------------------------- diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 3688a0e945414..73110cd11f5af 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -151,7 +151,7 @@ returns }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index e2336bd5cb066..db64e87412e03 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -99,7 +99,7 @@ The following parameters are accepted by `text` fields: `index_phrases`:: If enabled, two-term word combinations ('shingles') are indexed into a separate - field. This allows exact phrase queries to run more efficiently, at the expense + field. This allows exact phrase queries (no slop) to run more efficiently, at the expense of a larger index. Note that this works best when stopwords are not removed, as phrases containing stopwords will not use the subsidiary field and will fall back to a standard phrase query. Accepts `true` or `false` (default). @@ -171,4 +171,4 @@ PUT my_index -------------------------------- // CONSOLE <1> `min_chars` must be greater than zero, defaults to 2 -<2> `max_chars` must be greater than or equal to `min_chars` and less than 20, defaults to 5 \ No newline at end of file +<2> `max_chars` must be greater than or equal to `min_chars` and less than 20, defaults to 5 diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 42fd6b7afbe73..924a6984dc044 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -39,6 +39,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] @@ -53,4 +54,5 @@ include::migrate_7_0/java.asciidoc[] include::migrate_7_0/settings.asciidoc[] include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] -include::migrate_7_0/restclient.asciidoc[] \ No newline at end of file +include::migrate_7_0/restclient.asciidoc[] +include::migrate_7_0/low_level_restclient.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index b4f29935be9ab..08f181b2919e2 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -21,5 +21,3 @@ has been removed. `missing_bucket` should be used instead. The object used to share aggregation state between the scripts in a Scripted Metric Aggregation is now a variable called `state` available in the script context, rather than being provided via the `params` object as `params._agg`. - -The old `params._agg` variable is still available as well. diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index bab7b60222013..a47cc6f4324a2 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -78,3 +78,7 @@ The parent circuit breaker defines a new setting `indices.breaker.total.use_real heap memory instead of only considering the reserved memory by child circuit breakers. When this setting is `true`, the default parent breaker limit also changes from 70% to 95% of the JVM heap size. The previous behavior can be restored by setting `indices.breaker.total.use_real_memory` to `false`. + +==== `fix` value for `index.shard.check_on_startup` is removed + +Deprecated option value `fix` for setting `index.shard.check_on_startup` is not supported. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc new file mode 100644 index 0000000000000..77f5266763ffe --- /dev/null +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -0,0 +1,14 @@ +[[breaking_70_low_level_restclient_changes]] +=== Low-level REST client changes + +==== Deprecated flavors of performRequest have been removed + +We deprecated the flavors of `performRequest` and `performRequestAsync` that +do not take `Request` objects in 6.4.0 in favor of the flavors that take +`Request` objects because those methods can be extended without breaking +backwards compatibility. + +==== Removed setHosts + +We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports +host metadata used by the `NodeSelector`. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 11f4650912723..a7d32896e9723 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -54,6 +54,13 @@ Setting `request_cache:true` on a query that creates a scroll (`scroll=1m`) has been deprecated in 6 and will now return a `400 - Bad request`. Scroll queries are not meant to be cached. +==== Scroll queries cannot use `rescore` anymore +Including a rescore clause on a query that creates a scroll (`scroll=1m`) has +been deprecated in 6.5 and will now return a `400 - Bad request`. Allowing +rescore on scroll queries would break the scroll sort. In the 6.x line, the +rescore clause was silently ignored (for scroll queries), and it was allowed in +the 5.x line. + ==== Term Suggesters supported distance algorithms The following string distance algorithms were given additional names in 6.2 and @@ -92,8 +99,16 @@ deprecated in 6.x, has been removed. Context enabled suggestion queries without contexts have to visit every suggestion, which degrades the search performance considerably. +For geo context the value of the `path` parameter is now validated against the mapping, +and the context is only accepted if `path` points to a field with `geo_point` type. + ==== Semantics changed for `max_concurrent_shard_requests` `max_concurrent_shard_requests` used to limit the total number of concurrent shard requests a single high level search request can execute. In 7.0 this changed to be the max number of concurrent shard requests per node. The default is now `5`. + +==== `max_score` set to `null` when scores are not tracked + +`max_score` used to be set to `0` whenever scores are not tracked. `null` is now used +instead which is a more appropriate value for a scenario where scores are not available. diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/docs/reference/ml/aggregations.asciidoc similarity index 99% rename from x-pack/docs/en/ml/aggregations.asciidoc rename to docs/reference/ml/aggregations.asciidoc index 07f465015696d..4b873ea790b1e 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/docs/reference/ml/aggregations.asciidoc @@ -41,7 +41,7 @@ PUT _xpack/ml/anomaly_detectors/farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_data] +// TEST[skip:setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -90,7 +90,7 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its diff --git a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc b/docs/reference/ml/apis/calendarresource.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/calendarresource.asciidoc rename to docs/reference/ml/apis/calendarresource.asciidoc index 8edb43ed7a393..4279102cd35fc 100644 --- a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc +++ b/docs/reference/ml/apis/calendarresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-calendar-resource]] === Calendar Resources diff --git a/x-pack/docs/en/rest-api/ml/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/close-job.asciidoc rename to docs/reference/ml/apis/close-job.asciidoc index 8e7e8eb0ce850..6dec6402c8766 100644 --- a/x-pack/docs/en/rest-api/ml/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-close-job]] === Close Jobs API ++++ @@ -80,7 +81,7 @@ The following example closes the `total-requests` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the job is closed, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc rename to docs/reference/ml/apis/datafeedresource.asciidoc index 0ffeb6bc89d72..6fe0b35d95185 100644 --- a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-datafeed-resource]] === {dfeed-cap} Resources diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc rename to docs/reference/ml/apis/delete-calendar-event.asciidoc index ef8dad39dba70..8961726f57322 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-event]] === Delete Events from Calendar API ++++ @@ -44,7 +45,7 @@ calendar: DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st -------------------------------------------------- // CONSOLE -// TEST[catch:missing] +// TEST[skip:catch:missing] When the event is removed, you receive the following results: [source,js] @@ -53,4 +54,3 @@ When the event is removed, you receive the following results: "acknowledged": true } ---- -// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc rename to docs/reference/ml/apis/delete-calendar-job.asciidoc index 94388c0c4b680..4362a82b5cb7e 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-job]] === Delete Jobs from Calendar API ++++ @@ -38,7 +39,7 @@ calendar and `total-requests` job: DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] When the job is removed from the calendar, you receive the following results: @@ -50,4 +51,4 @@ results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc rename to docs/reference/ml/apis/delete-calendar.asciidoc index f7673b545748b..9f9f3457f24d2 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar]] === Delete Calendar API ++++ @@ -40,7 +41,7 @@ The following example deletes the `planned-outages` calendar: DELETE _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages] +// TEST[skip:setup:calendar_outages] When the calendar is deleted, you receive the following results: [source,js] @@ -49,4 +50,4 @@ When the calendar is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc rename to docs/reference/ml/apis/delete-datafeed.asciidoc index db4fd5c177aed..996d2c7dd2eaf 100644 --- a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-datafeed]] === Delete {dfeeds-cap} API ++++ @@ -47,7 +48,7 @@ The following example deletes the `datafeed-total-requests` {dfeed}: DELETE _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is deleted, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-filter.asciidoc rename to docs/reference/ml/apis/delete-filter.asciidoc index b58d2980b888a..21e35b66076f6 100644 --- a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-filter]] === Delete Filter API ++++ @@ -41,7 +42,7 @@ The following example deletes the `safe_domains` filter: DELETE _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] When the filter is deleted, you receive the following results: [source,js] @@ -50,4 +51,4 @@ When the filter is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/delete-job.asciidoc rename to docs/reference/ml/apis/delete-job.asciidoc index c01b08545b638..d5ef120ad040b 100644 --- a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-job]] === Delete Jobs API ++++ @@ -56,7 +57,7 @@ The following example deletes the `total-requests` job: DELETE _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is deleted, you receive the following results: [source,js] @@ -65,4 +66,4 @@ When the job is deleted, you receive the following results: "acknowledged": true } ---- -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc rename to docs/reference/ml/apis/delete-snapshot.asciidoc index 2ab0116fe74d9..96a3590054557 100644 --- a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-snapshot]] === Delete Model Snapshots API ++++ @@ -32,7 +33,6 @@ the `model_snapshot_id` in the results from the get jobs API. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples @@ -53,3 +53,4 @@ When the snapshot is deleted, you receive the following results: "acknowledged": true } ---- +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc b/docs/reference/ml/apis/eventresource.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/eventresource.asciidoc rename to docs/reference/ml/apis/eventresource.asciidoc index c9ab78964213e..a1e96f5c25a0a 100644 --- a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc +++ b/docs/reference/ml/apis/eventresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-event-resource]] === Scheduled Event Resources diff --git a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc b/docs/reference/ml/apis/filterresource.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/filterresource.asciidoc rename to docs/reference/ml/apis/filterresource.asciidoc index e942447c1ee60..e67c92dc8d096 100644 --- a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc +++ b/docs/reference/ml/apis/filterresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-filter-resource]] === Filter Resources diff --git a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/flush-job.asciidoc rename to docs/reference/ml/apis/flush-job.asciidoc index 934a2d81b1778..f19d2aa648f68 100644 --- a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-flush-job]] === Flush Jobs API ++++ @@ -74,7 +75,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] @@ -84,7 +85,7 @@ When the operation succeeds, you receive the following results: "last_finalized_bucket_end": 1455234900000 } ---- -// TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] +//TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] The `last_finalized_bucket_end` provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. @@ -101,7 +102,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/forecast.asciidoc rename to docs/reference/ml/apis/forecast.asciidoc index 99647ecae1b25..197876f3f04a7 100644 --- a/x-pack/docs/en/rest-api/ml/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-forecast]] === Forecast Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/get-bucket.asciidoc rename to docs/reference/ml/apis/get-bucket.asciidoc index 95b05ff7f5dd2..3a276c13e895b 100644 --- a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-bucket]] === Get Buckets API ++++ @@ -81,7 +82,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc rename to docs/reference/ml/apis/get-calendar-event.asciidoc index e89173c3382d9..43dd74e47c977 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar-event]] === Get Scheduled Events API ++++ @@ -66,7 +67,7 @@ The following example gets information about the scheduled events in the GET _xpack/ml/calendars/planned-outages/events -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addevent] +// TEST[skip:setup:calendar_outages_addevent] The API returns the following results: diff --git a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-calendar.asciidoc rename to docs/reference/ml/apis/get-calendar.asciidoc index ae95fd9968893..f86875f326cd9 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar]] === Get Calendars API ++++ @@ -62,7 +63,7 @@ calendar: GET _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: [source,js] @@ -79,4 +80,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-category.asciidoc rename to docs/reference/ml/apis/get-category.asciidoc index 13f274133c0d1..e5d6fe16802a1 100644 --- a/x-pack/docs/en/rest-api/ml/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-category]] === Get Categories API ++++ @@ -18,7 +19,6 @@ Retrieves job results for one or more categories. For more information about categories, see {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. -//<>. ==== Path Parameters @@ -56,7 +56,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc rename to docs/reference/ml/apis/get-datafeed-stats.asciidoc index 2869e8222f86f..9ca67cc17fb44 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed-stats]] === Get {dfeed-cap} Statistics API ++++ @@ -66,7 +67,7 @@ The following example gets usage information for the GET _xpack/ml/datafeeds/datafeed-total-requests/_stats -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] The API returns the following results: [source,js] @@ -97,4 +98,4 @@ The API returns the following results: // TESTRESPONSE[s/"node-0"/$body.$_path/] // TESTRESPONSE[s/"hoXMLZB0RWKfR9UPPUCxXX"/$body.$_path/] // TESTRESPONSE[s/"127.0.0.1:9300"/$body.$_path/] -// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] +// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc rename to docs/reference/ml/apis/get-datafeed.asciidoc index 0fa51773fd162..db5f4249669bb 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed]] === Get {dfeeds-cap} API ++++ @@ -60,7 +61,7 @@ The following example gets configuration information for the GET _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-filter.asciidoc rename to docs/reference/ml/apis/get-filter.asciidoc index b4699e9d622cf..2dbb5d16cc5a4 100644 --- a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-filter]] === Get Filters API ++++ @@ -62,7 +63,7 @@ filter: GET _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: [source,js] @@ -81,4 +82,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-influencer.asciidoc rename to docs/reference/ml/apis/get-influencer.asciidoc index bffd2b8e09633..182cca7aa9917 100644 --- a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-influencer]] === Get Influencers API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc rename to docs/reference/ml/apis/get-job-stats.asciidoc index bd59ee8b258fa..509d9448a693e 100644 --- a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job-stats]] === Get Job Statistics API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-job.asciidoc rename to docs/reference/ml/apis/get-job.asciidoc index 2e95d8e01bbb7..c669ac6034eda 100644 --- a/x-pack/docs/en/rest-api/ml/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job]] === Get Jobs API ++++ @@ -59,7 +60,7 @@ The following example gets configuration information for the `total-requests` jo GET _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc rename to docs/reference/ml/apis/get-overall-buckets.asciidoc index f2581f4904e37..f4818f3bbbe44 100644 --- a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-overall-buckets]] === Get Overall Buckets API ++++ @@ -93,7 +94,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-record.asciidoc rename to docs/reference/ml/apis/get-record.asciidoc index 1870b44159760..199cce1548427 100644 --- a/x-pack/docs/en/rest-api/ml/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-record]] === Get Records API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc rename to docs/reference/ml/apis/get-snapshot.asciidoc index 24e82af1f199f..e194d944b636c 100644 --- a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-snapshot]] === Get Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobcounts.asciidoc rename to docs/reference/ml/apis/jobcounts.asciidoc index d343cc23ae0ad..d0169e228d549 100644 --- a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc +++ b/docs/reference/ml/apis/jobcounts.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-jobstats]] === Job Statistics diff --git a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc b/docs/reference/ml/apis/jobresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobresource.asciidoc rename to docs/reference/ml/apis/jobresource.asciidoc index 5b109b1c21d78..e0c314724e762 100644 --- a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc +++ b/docs/reference/ml/apis/jobresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-job-resource]] === Job Resources diff --git a/x-pack/docs/en/rest-api/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc similarity index 61% rename from x-pack/docs/en/rest-api/ml-api.asciidoc rename to docs/reference/ml/apis/ml-api.asciidoc index b48e9f934042d..b8509f221524c 100644 --- a/x-pack/docs/en/rest-api/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-apis]] == Machine Learning APIs @@ -70,57 +71,57 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> //ADD -include::ml/post-calendar-event.asciidoc[] -include::ml/put-calendar-job.asciidoc[] +include::post-calendar-event.asciidoc[] +include::put-calendar-job.asciidoc[] //CLOSE -include::ml/close-job.asciidoc[] +include::close-job.asciidoc[] //CREATE -include::ml/put-calendar.asciidoc[] -include::ml/put-datafeed.asciidoc[] -include::ml/put-filter.asciidoc[] -include::ml/put-job.asciidoc[] +include::put-calendar.asciidoc[] +include::put-datafeed.asciidoc[] +include::put-filter.asciidoc[] +include::put-job.asciidoc[] //DELETE -include::ml/delete-calendar.asciidoc[] -include::ml/delete-datafeed.asciidoc[] -include::ml/delete-calendar-event.asciidoc[] -include::ml/delete-filter.asciidoc[] -include::ml/delete-job.asciidoc[] -include::ml/delete-calendar-job.asciidoc[] -include::ml/delete-snapshot.asciidoc[] +include::delete-calendar.asciidoc[] +include::delete-datafeed.asciidoc[] +include::delete-calendar-event.asciidoc[] +include::delete-filter.asciidoc[] +include::delete-job.asciidoc[] +include::delete-calendar-job.asciidoc[] +include::delete-snapshot.asciidoc[] //FLUSH -include::ml/flush-job.asciidoc[] +include::flush-job.asciidoc[] //FORECAST -include::ml/forecast.asciidoc[] +include::forecast.asciidoc[] //GET -include::ml/get-calendar.asciidoc[] -include::ml/get-bucket.asciidoc[] -include::ml/get-overall-buckets.asciidoc[] -include::ml/get-category.asciidoc[] -include::ml/get-datafeed.asciidoc[] -include::ml/get-datafeed-stats.asciidoc[] -include::ml/get-influencer.asciidoc[] -include::ml/get-job.asciidoc[] -include::ml/get-job-stats.asciidoc[] -include::ml/get-snapshot.asciidoc[] -include::ml/get-calendar-event.asciidoc[] -include::ml/get-filter.asciidoc[] -include::ml/get-record.asciidoc[] +include::get-calendar.asciidoc[] +include::get-bucket.asciidoc[] +include::get-overall-buckets.asciidoc[] +include::get-category.asciidoc[] +include::get-datafeed.asciidoc[] +include::get-datafeed-stats.asciidoc[] +include::get-influencer.asciidoc[] +include::get-job.asciidoc[] +include::get-job-stats.asciidoc[] +include::get-snapshot.asciidoc[] +include::get-calendar-event.asciidoc[] +include::get-filter.asciidoc[] +include::get-record.asciidoc[] //OPEN -include::ml/open-job.asciidoc[] +include::open-job.asciidoc[] //POST -include::ml/post-data.asciidoc[] +include::post-data.asciidoc[] //PREVIEW -include::ml/preview-datafeed.asciidoc[] +include::preview-datafeed.asciidoc[] //REVERT -include::ml/revert-snapshot.asciidoc[] +include::revert-snapshot.asciidoc[] //START/STOP -include::ml/start-datafeed.asciidoc[] -include::ml/stop-datafeed.asciidoc[] +include::start-datafeed.asciidoc[] +include::stop-datafeed.asciidoc[] //UPDATE -include::ml/update-datafeed.asciidoc[] -include::ml/update-filter.asciidoc[] -include::ml/update-job.asciidoc[] -include::ml/update-snapshot.asciidoc[] +include::update-datafeed.asciidoc[] +include::update-filter.asciidoc[] +include::update-job.asciidoc[] +include::update-snapshot.asciidoc[] //VALIDATE -//include::ml/validate-detector.asciidoc[] -//include::ml/validate-job.asciidoc[] +//include::validate-detector.asciidoc[] +//include::validate-job.asciidoc[] diff --git a/x-pack/docs/en/rest-api/ml/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/open-job.asciidoc rename to docs/reference/ml/apis/open-job.asciidoc index 59d5568ac775a..c1e5977b734fd 100644 --- a/x-pack/docs/en/rest-api/ml/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-open-job]] === Open Jobs API ++++ @@ -56,7 +57,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_open } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job opens, you receive the following results: [source,js] @@ -65,5 +66,4 @@ When the job opens, you receive the following results: "opened": true } ---- -//CONSOLE // TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc rename to docs/reference/ml/apis/post-calendar-event.asciidoc index 41af0841d2e83..998db409fc7d6 100644 --- a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-calendar-event]] === Add Events to Calendar API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/calendars/planned-outages/events } -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: @@ -81,7 +82,7 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE For more information about these properties, see <>. diff --git a/x-pack/docs/en/rest-api/ml/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/post-data.asciidoc rename to docs/reference/ml/apis/post-data.asciidoc index 40354d7f6f760..6a5a3d3d6cb5e 100644 --- a/x-pack/docs/en/rest-api/ml/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-data]] === Post Data to Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc rename to docs/reference/ml/apis/preview-datafeed.asciidoc index 637b506cb9af7..7b9eccd9a5920 100644 --- a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-preview-datafeed]] === Preview {dfeeds-cap} API ++++ @@ -53,7 +54,7 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}: GET _xpack/ml/datafeeds/datafeed-farequote/_preview -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_datafeed] +// TEST[skip:setup:farequote_datafeed] The data that is returned for this example is as follows: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc rename to docs/reference/ml/apis/put-calendar-job.asciidoc index 6940957b15926..0563047043ae4 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar-job]] === Add Jobs to Calendar API ++++ @@ -38,7 +39,7 @@ The following example associates the `planned-outages` calendar with the PUT _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_openjob] +// TEST[skip:setup:calendar_outages_openjob] The API returns the following results: @@ -51,4 +52,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/put-calendar.asciidoc rename to docs/reference/ml/apis/put-calendar.asciidoc index a82da5a2c0c0a..06b8e55d7747c 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar]] === Create Calendar API ++++ @@ -44,6 +45,7 @@ The following example creates the `planned-outages` calendar: PUT _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE +// TEST[skip:need-license] When the calendar is created, you receive the following results: [source,js] @@ -53,4 +55,4 @@ When the calendar is created, you receive the following results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc rename to docs/reference/ml/apis/put-datafeed.asciidoc index 6b8ad932a1d42..b5c99fc8e36af 100644 --- a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-datafeed]] === Create {dfeeds-cap} API ++++ @@ -107,7 +108,7 @@ PUT _xpack/ml/datafeeds/datafeed-total-requests } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the {dfeed} is created, you receive the following results: [source,js] @@ -132,4 +133,4 @@ When the {dfeed} is created, you receive the following results: } ---- // TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] -// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/put-filter.asciidoc rename to docs/reference/ml/apis/put-filter.asciidoc index d2982a56f612e..165fe9697584c 100644 --- a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-filter]] === Create Filter API ++++ @@ -55,6 +56,7 @@ PUT _xpack/ml/filters/safe_domains } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the filter is created, you receive the following response: [source,js] @@ -65,4 +67,4 @@ When the filter is created, you receive the following response: "items": ["*.google.com", "wikipedia.org"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-job.asciidoc rename to docs/reference/ml/apis/put-job.asciidoc index 1c436f53d32e7..ce05348490601 100644 --- a/x-pack/docs/en/rest-api/ml/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-job]] === Create Jobs API ++++ @@ -104,6 +105,7 @@ PUT _xpack/ml/anomaly_detectors/total-requests } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the job is created, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc b/docs/reference/ml/apis/resultsresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/resultsresource.asciidoc rename to docs/reference/ml/apis/resultsresource.asciidoc index c28ed72aedb36..d3abd094be79c 100644 --- a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc +++ b/docs/reference/ml/apis/resultsresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-results-resource]] === Results Resources diff --git a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc similarity index 67% rename from x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc rename to docs/reference/ml/apis/revert-snapshot.asciidoc index 1dc3046ac4f22..48fc65edf90fa 100644 --- a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-revert-snapshot]] === Revert Model Snapshots API ++++ @@ -22,33 +23,6 @@ then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. -//// -To revert to a saved snapshot, you must follow this sequence: -. Close the job -. Revert to a snapshot -. Open the job -. Send new data to the job - -When reverting to a snapshot, there is a choice to make about whether or not -you want to keep the results that were created between the time of the snapshot -and the current time. In the case of Black Friday for instance, you might want -to keep the results and carry on processing data from the current time, -though without the models learning the one-off behavior and compensating for it. -However, say in the event of a critical system failure and you decide to reset -and models to a previous known good state and process data from that time, -it makes sense to delete the intervening results for the known bad period and -resend data from that earlier time. - -Any gaps in data since the snapshot time will be treated as nulls and not modeled. -If there is a partial bucket at the end of the snapshot and/or at the beginning -of the new input data, then this will be ignored and treated as a gap. - -For jobs with many entities, the model state may be very large. -If a model state is several GB, this could take 10-20 mins to revert depending -upon machine spec and resources. If this is the case, please ensure this time -is planned for. -Model size (in bytes) is available as part of the Job Resource Model Size Stats. -//// IMPORTANT: Before you revert to a saved snapshot, you must close the job. @@ -77,7 +51,6 @@ If you want to resend data, then delete the intervening results. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc b/docs/reference/ml/apis/snapshotresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc rename to docs/reference/ml/apis/snapshotresource.asciidoc index fb2e3d83de6d1..f068f6d94ed0f 100644 --- a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc +++ b/docs/reference/ml/apis/snapshotresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-snapshot-resource]] === Model Snapshot Resources diff --git a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc rename to docs/reference/ml/apis/start-datafeed.asciidoc index fa3ea35a751f7..566e700dd043b 100644 --- a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-start-datafeed]] === Start {dfeeds-cap} API ++++ @@ -79,7 +80,6 @@ of the latest processed record. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Security Integration @@ -101,7 +101,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_start } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the {dfeed} starts, you receive the following results: [source,js] @@ -110,5 +110,4 @@ When the {dfeed} starts, you receive the following results: "started": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc rename to docs/reference/ml/apis/stop-datafeed.asciidoc index 27872ff5a2080..7ea48974f2df1 100644 --- a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-stop-datafeed]] === Stop {dfeeds-cap} API ++++ @@ -18,7 +19,6 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _xpack/ml/datafeeds/_all/_stop` -//TBD: Can there be spaces between the items in the list? ===== Description @@ -63,14 +63,14 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_stop } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] When the {dfeed} stops, you receive the following results: + [source,js] ---- { "stopped": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc rename to docs/reference/ml/apis/update-datafeed.asciidoc index bc9462347c1c0..be55d864c871e 100644 --- a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-datafeed]] === Update {dfeeds-cap} API ++++ @@ -106,7 +107,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is updated, you receive the full {dfeed} configuration with with the updated values: diff --git a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/update-filter.asciidoc rename to docs/reference/ml/apis/update-filter.asciidoc index 1b6760dfed654..f551c8e923b89 100644 --- a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-filter]] === Update Filter API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/filters/safe_domains/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: @@ -64,4 +65,4 @@ The API returns the following results: "items": ["*.google.com", "*.myorg.com"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-job.asciidoc rename to docs/reference/ml/apis/update-job.asciidoc index 852745e9dd907..58bfb2679d935 100644 --- a/x-pack/docs/en/rest-api/ml/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-job]] === Update Jobs API ++++ @@ -121,7 +122,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is updated, you receive a summary of the job configuration information, including the updated property values. For example: @@ -177,4 +178,4 @@ information, including the updated property values. For example: } ---- // TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/] -// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] +// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc rename to docs/reference/ml/apis/update-snapshot.asciidoc index 8c98a7b732186..b58eebe810fa1 100644 --- a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-snapshot]] === Update Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/validate-detector.asciidoc rename to docs/reference/ml/apis/validate-detector.asciidoc index ab8a0de442cf8..e525b1a1b2008 100644 --- a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-detector]] === Validate Detectors API ++++ @@ -44,6 +45,7 @@ POST _xpack/ml/anomaly_detectors/_validate/detector } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation completes, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/validate-job.asciidoc rename to docs/reference/ml/apis/validate-job.asciidoc index 0ccc5bc04e1d1..b83260582602e 100644 --- a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-job]] === Validate Jobs API ++++ @@ -55,6 +56,7 @@ POST _xpack/ml/anomaly_detectors/_validate } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation is complete, you receive the following results: [source,js] diff --git a/x-pack/docs/en/ml/categories.asciidoc b/docs/reference/ml/categories.asciidoc similarity index 99% rename from x-pack/docs/en/ml/categories.asciidoc rename to docs/reference/ml/categories.asciidoc index 21f71b871cbb9..03ebc8af76ee6 100644 --- a/x-pack/docs/en/ml/categories.asciidoc +++ b/docs/reference/ml/categories.asciidoc @@ -44,6 +44,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The `categorization_field_name` property indicates which field will be categorized. <2> The resulting categories are used in a detector by setting `by_field_name`, @@ -127,6 +128,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs2 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The {ref}/analysis-pattern-replace-charfilter.html[`pattern_replace` character filter] here achieves exactly the same as the `categorization_filters` in the first @@ -193,6 +195,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs3 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> Tokens basically consist of hyphens, digits, letters, underscores and dots. <2> By default, categorization ignores tokens that begin with a digit. <3> By default, categorization also ignores tokens that are hexadecimal numbers. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/docs/reference/ml/configuring.asciidoc similarity index 88% rename from x-pack/docs/en/ml/configuring.asciidoc rename to docs/reference/ml/configuring.asciidoc index e35f046a82bd9..9b6149d662aa8 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/docs/reference/ml/configuring.asciidoc @@ -36,20 +36,20 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/docs/reference/ml/customurl.asciidoc similarity index 99% rename from x-pack/docs/en/ml/customurl.asciidoc rename to docs/reference/ml/customurl.asciidoc index 7c197084c0e5f..95f4f5f938f0a 100644 --- a/x-pack/docs/en/ml/customurl.asciidoc +++ b/docs/reference/ml/customurl.asciidoc @@ -106,7 +106,7 @@ POST _xpack/ml/anomaly_detectors/sample_job/_update } ---------------------------------- //CONSOLE -//TEST[setup:sample_job] +//TEST[skip:setup:sample_job] When you click this custom URL in the anomalies table in {kib}, it opens up the *Discover* page and displays source data for the period one hour before and diff --git a/x-pack/docs/en/ml/detector-custom-rules.asciidoc b/docs/reference/ml/detector-custom-rules.asciidoc similarity index 97% rename from x-pack/docs/en/ml/detector-custom-rules.asciidoc rename to docs/reference/ml/detector-custom-rules.asciidoc index 8513c7e4d2566..02881f4cc4313 100644 --- a/x-pack/docs/en/ml/detector-custom-rules.asciidoc +++ b/docs/reference/ml/detector-custom-rules.asciidoc @@ -39,6 +39,7 @@ PUT _xpack/ml/filters/safe_domains } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Now, we can create our job specifying a scope that uses the `safe_domains` filter for the `highest_registered_domain` field: @@ -70,6 +71,7 @@ PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] As time advances and we see more data and more results, we might encounter new domains that we want to add in the filter. We can do that by using the @@ -83,7 +85,7 @@ POST _xpack/ml/filters/safe_domains/_update } ---------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] Note that we can use any of the `partition_field_name`, `over_field_name`, or `by_field_name` fields in the `scope`. @@ -123,6 +125,7 @@ PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Such a detector will skip results when the values of all 3 scoped fields are included in the referenced filters. @@ -166,6 +169,7 @@ PUT _xpack/ml/anomaly_detectors/cpu_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] When there are multiple conditions they are combined with a logical `and`. This is useful when we want the rule to apply to a range. We simply create @@ -205,6 +209,7 @@ PUT _xpack/ml/anomaly_detectors/rule_with_range } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] ==== Custom rules in the life-cycle of a job diff --git a/x-pack/docs/en/ml/functions.asciidoc b/docs/reference/ml/functions.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions.asciidoc rename to docs/reference/ml/functions.asciidoc diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/docs/reference/ml/functions/count.asciidoc similarity index 97% rename from x-pack/docs/en/ml/functions/count.asciidoc rename to docs/reference/ml/functions/count.asciidoc index a2dc5645b61ae..abbbd118ffebf 100644 --- a/x-pack/docs/en/ml/functions/count.asciidoc +++ b/docs/reference/ml/functions/count.asciidoc @@ -59,6 +59,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than @@ -86,6 +87,7 @@ PUT _xpack/ml/anomaly_detectors/example2 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_count` function in a detector in your job, it models the event rate for each error code. It detects users that generate an @@ -110,6 +112,7 @@ PUT _xpack/ml/anomaly_detectors/example3 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] In this example, the function detects when the count of events for a status code is lower than usual. @@ -136,6 +139,7 @@ PUT _xpack/ml/anomaly_detectors/example4 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you are analyzing an aggregated `events_per_min` field, do not use a sum function (for example, `sum(events_per_min)`). Instead, use the count function @@ -200,6 +204,7 @@ PUT _xpack/ml/anomaly_detectors/example5 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_non_zero_count` function in a detector in your job, it models the count of events for the `signaturename` field. It ignores any buckets @@ -253,6 +258,7 @@ PUT _xpack/ml/anomaly_detectors/example6 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number of logged in users. When you use this function in a detector in your job, it @@ -278,6 +284,7 @@ PUT _xpack/ml/anomaly_detectors/example7 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a detector in your job, it models the distinct count of ports. It also detects the diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/docs/reference/ml/functions/geo.asciidoc similarity index 98% rename from x-pack/docs/en/ml/functions/geo.asciidoc rename to docs/reference/ml/functions/geo.asciidoc index 5bcf6c3394558..461ab825ff5b2 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/docs/reference/ml/functions/geo.asciidoc @@ -47,6 +47,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `lat_long` function in a detector in your job, it detects anomalies where the geographic location of a credit card transaction is @@ -98,6 +99,6 @@ PUT _xpack/ml/datafeeds/datafeed-test2 } -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/docs/reference/ml/functions/info.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/info.asciidoc rename to docs/reference/ml/functions/info.asciidoc diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/docs/reference/ml/functions/metric.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/metric.asciidoc rename to docs/reference/ml/functions/metric.asciidoc diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/docs/reference/ml/functions/rare.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/rare.asciidoc rename to docs/reference/ml/functions/rare.asciidoc diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/docs/reference/ml/functions/sum.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/sum.asciidoc rename to docs/reference/ml/functions/sum.asciidoc diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/docs/reference/ml/functions/time.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/time.asciidoc rename to docs/reference/ml/functions/time.asciidoc diff --git a/x-pack/docs/en/ml/images/ml-category-advanced.jpg b/docs/reference/ml/images/ml-category-advanced.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-advanced.jpg rename to docs/reference/ml/images/ml-category-advanced.jpg diff --git a/x-pack/docs/en/ml/images/ml-category-anomalies.jpg b/docs/reference/ml/images/ml-category-anomalies.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-anomalies.jpg rename to docs/reference/ml/images/ml-category-anomalies.jpg diff --git a/x-pack/docs/en/ml/images/ml-categoryterms.jpg b/docs/reference/ml/images/ml-categoryterms.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-categoryterms.jpg rename to docs/reference/ml/images/ml-categoryterms.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-job.jpg b/docs/reference/ml/images/ml-create-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-job.jpg rename to docs/reference/ml/images/ml-create-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-jobs.jpg b/docs/reference/ml/images/ml-create-jobs.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-jobs.jpg rename to docs/reference/ml/images/ml-create-jobs.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-detail.jpg b/docs/reference/ml/images/ml-customurl-detail.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-detail.jpg rename to docs/reference/ml/images/ml-customurl-detail.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-discover.jpg b/docs/reference/ml/images/ml-customurl-discover.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-discover.jpg rename to docs/reference/ml/images/ml-customurl-discover.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-edit.jpg b/docs/reference/ml/images/ml-customurl-edit.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-edit.jpg rename to docs/reference/ml/images/ml-customurl-edit.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl.jpg b/docs/reference/ml/images/ml-customurl.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl.jpg rename to docs/reference/ml/images/ml-customurl.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-dates.jpg b/docs/reference/ml/images/ml-data-dates.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-dates.jpg rename to docs/reference/ml/images/ml-data-dates.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-keywords.jpg b/docs/reference/ml/images/ml-data-keywords.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-keywords.jpg rename to docs/reference/ml/images/ml-data-keywords.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-metrics.jpg b/docs/reference/ml/images/ml-data-metrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-metrics.jpg rename to docs/reference/ml/images/ml-data-metrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg b/docs/reference/ml/images/ml-data-topmetrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-topmetrics.jpg rename to docs/reference/ml/images/ml-data-topmetrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-visualizer.jpg b/docs/reference/ml/images/ml-data-visualizer.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-visualizer.jpg rename to docs/reference/ml/images/ml-data-visualizer.jpg diff --git a/x-pack/docs/en/ml/images/ml-edit-job.jpg b/docs/reference/ml/images/ml-edit-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-edit-job.jpg rename to docs/reference/ml/images/ml-edit-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-anomaly.jpg b/docs/reference/ml/images/ml-population-anomaly.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-anomaly.jpg rename to docs/reference/ml/images/ml-population-anomaly.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-job.jpg b/docs/reference/ml/images/ml-population-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-job.jpg rename to docs/reference/ml/images/ml-population-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-results.jpg b/docs/reference/ml/images/ml-population-results.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-results.jpg rename to docs/reference/ml/images/ml-population-results.jpg diff --git a/x-pack/docs/en/ml/images/ml-scriptfields.jpg b/docs/reference/ml/images/ml-scriptfields.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-scriptfields.jpg rename to docs/reference/ml/images/ml-scriptfields.jpg diff --git a/x-pack/docs/en/ml/images/ml-start-feed.jpg b/docs/reference/ml/images/ml-start-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-start-feed.jpg rename to docs/reference/ml/images/ml-start-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml-stop-feed.jpg b/docs/reference/ml/images/ml-stop-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-stop-feed.jpg rename to docs/reference/ml/images/ml-stop-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml.jpg b/docs/reference/ml/images/ml.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml.jpg rename to docs/reference/ml/images/ml.jpg diff --git a/x-pack/docs/en/ml/populations.asciidoc b/docs/reference/ml/populations.asciidoc similarity index 94% rename from x-pack/docs/en/ml/populations.asciidoc rename to docs/reference/ml/populations.asciidoc index bf0dd2ad7d7bb..ed58c117f17d7 100644 --- a/x-pack/docs/en/ml/populations.asciidoc +++ b/docs/reference/ml/populations.asciidoc @@ -51,14 +51,11 @@ PUT _xpack/ml/anomaly_detectors/population } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> This `over_field_name` property indicates that the metrics for each user ( as identified by their `username` value) are analyzed relative to other users in each bucket. -//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to -//include only workstations as servers and printers would behave differently -//from the population - If your data is stored in {es}, you can use the population job wizard in {kib} to create a job with these same properties. For example, the population job wizard provides the following job settings: diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/docs/reference/ml/stopping-ml.asciidoc similarity index 94% rename from x-pack/docs/en/ml/stopping-ml.asciidoc rename to docs/reference/ml/stopping-ml.asciidoc index c0be2d947cdc7..17505a02d1521 100644 --- a/x-pack/docs/en/ml/stopping-ml.asciidoc +++ b/docs/reference/ml/stopping-ml.asciidoc @@ -28,7 +28,7 @@ request stops the `feed1` {dfeed}: POST _xpack/ml/datafeeds/datafeed-total-requests/_stop -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -49,6 +49,7 @@ If you are upgrading your cluster, you can use the following request to stop all POST _xpack/ml/datafeeds/_all/_stop ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] [float] [[closing-ml-jobs]] @@ -67,7 +68,7 @@ example, the following request closes the `job1` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -86,3 +87,4 @@ all open jobs on the cluster: POST _xpack/ml/anomaly_detectors/_all/_close ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc similarity index 97% rename from x-pack/docs/en/ml/transforms.asciidoc rename to docs/reference/ml/transforms.asciidoc index c4b4d56029748..a2276895fc9e8 100644 --- a/x-pack/docs/en/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -95,7 +95,7 @@ PUT /my_index/my_type/1 } ---------------------------------- // CONSOLE -// TESTSETUP +// TEST[skip:SETUP] <1> In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see @@ -144,7 +144,7 @@ PUT _xpack/ml/datafeeds/datafeed-test1 } ---------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> A script field named `total_error_count` is referenced in the detector within the job. <2> The script field is defined in the {dfeed}. @@ -163,7 +163,7 @@ You can preview the contents of the {dfeed} by using the following API: GET _xpack/ml/datafeeds/datafeed-test1/_preview ---------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] In this example, the API returns the following results, which contain a sum of the `error_count` and `aborted_count` values: @@ -177,8 +177,6 @@ the `error_count` and `aborted_count` values: } ] ---------------------------------- -// TESTRESPONSE - NOTE: This example demonstrates how to use script fields, but it contains insufficient data to generate meaningful results. For a full demonstration of @@ -254,7 +252,7 @@ PUT _xpack/ml/datafeeds/datafeed-test2 GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> The script field has a rather generic name in this case, since it will be used for various tests in the subsequent examples. <2> The script field uses the plus (+) operator to concatenate strings. @@ -271,7 +269,6 @@ and "SMITH " have been concatenated and an underscore was added: } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform3]] .Example 3: Trimming strings @@ -292,7 +289,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `trim()` function to trim extra white space from a string. @@ -308,7 +305,6 @@ has been trimmed to "SMITH": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform4]] .Example 4: Converting strings to lowercase @@ -329,7 +325,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `toLowerCase` function to convert a string to all lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert a string to uppercase letters. @@ -346,7 +342,6 @@ has been converted to "joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform5]] .Example 5: Converting strings to mixed case formats @@ -367,7 +362,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field is a more complicated example of case manipulation. It uses the `subString()` function to capitalize the first letter of a string and converts the remaining characters to lowercase. @@ -384,7 +379,6 @@ has been converted to "Joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform6]] .Example 6: Replacing tokens @@ -405,7 +399,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses regular expressions to replace white space with underscores. @@ -421,7 +415,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform7]] .Example 7: Regular expression matching and concatenation @@ -442,7 +435,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field looks for a specific regular expression pattern and emits the matched groups as a concatenated string. If no match is found, it emits an empty string. @@ -459,7 +452,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform8]] .Example 8: Splitting strings by domain name @@ -509,7 +501,7 @@ PUT _xpack/ml/datafeeds/datafeed-test3 GET _xpack/ml/datafeeds/datafeed-test3/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] If you have a single field that contains a well-formed DNS domain name, you can use the `domainSplit()` function to split the string into its highest registered @@ -537,7 +529,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform9]] .Example 9: Transforming geo_point data @@ -583,7 +574,7 @@ PUT _xpack/ml/datafeeds/datafeed-test4 GET _xpack/ml/datafeeds/datafeed-test4/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] In {es}, location data can be stored in `geo_point` fields but this data type is not supported natively in {xpackml} analytics. This example of a script field @@ -602,4 +593,4 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE + diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index f0f26a4665966..e9be7aa52e890 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -1,13 +1,12 @@ [[modules-discovery-zen]] === Zen Discovery -The zen discovery is the built in discovery module for Elasticsearch and -the default. It provides unicast discovery, but can be extended to -support cloud environments and other forms of discovery. +Zen discovery is the built-in, default, discovery module for Elasticsearch. It +provides unicast and file-based discovery, and can be extended to support cloud +environments and other forms of discovery via plugins. -The zen discovery is integrated with other modules, for example, all -communication between nodes is done using the -<> module. +Zen discovery is integrated with other modules, for example, all communication +between nodes is done using the <> module. It is separated into several sub modules, which are explained below: @@ -15,86 +14,155 @@ It is separated into several sub modules, which are explained below: [[ping]] ==== Ping -This is the process where a node uses the discovery mechanisms to find -other nodes. +This is the process where a node uses the discovery mechanisms to find other +nodes. + +[float] +[[discovery-seed-nodes]] +==== Seed nodes + +Zen discovery uses a list of _seed_ nodes in order to start off the discovery +process. At startup, or when electing a new master, Elasticsearch tries to +connect to each seed node in its list, and holds a gossip-like conversation with +them to find other nodes and to build a complete picture of the cluster. By +default there are two methods for configuring the list of seed nodes: _unicast_ +and _file-based_. It is recommended that the list of seed nodes comprises the +list of master-eligible nodes in the cluster. [float] [[unicast]] ===== Unicast -Unicast discovery requires a list of hosts to use that will act as gossip -routers. These hosts can be specified as hostnames or IP addresses; hosts -specified as hostnames are resolved to IP addresses during each round of -pinging. Note that if you are in an environment where DNS resolutions vary with -time, you might need to adjust your <>. +Unicast discovery configures a static list of hosts for use as seed nodes. +These hosts can be specified as hostnames or IP addresses; hosts specified as +hostnames are resolved to IP addresses during each round of pinging. Note that +if you are in an environment where DNS resolutions vary with time, you might +need to adjust your <>. -It is recommended that the unicast hosts list be maintained as the list of -master-eligible nodes in the cluster. +The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static +setting. This is either an array of hosts or a comma-delimited string. Each +value should be in the form of `host:port` or `host` (where `port` defaults to +the setting `transport.profiles.default.port` falling back to +`transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. The +default for this setting is `127.0.0.1, [::1]` -Unicast discovery provides the following settings with the `discovery.zen.ping.unicast` prefix: +Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the +amount of time to wait for DNS lookups on each round of pinging. This is +specified as a <> and defaults to 5s. -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`hosts` |Either an array setting or a comma delimited setting. Each - value should be in the form of `host:port` or `host` (where `port` defaults to the setting `transport.profiles.default.port` - falling back to `transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. Defaults to `127.0.0.1, [::1]` -|`hosts.resolve_timeout` |The amount of time to wait for DNS lookups on each round of pinging. Specified as -<>. Defaults to 5s. -|======================================================================= +Unicast discovery uses the <> module to perform the +discovery. -The unicast discovery uses the <> module to perform the discovery. +[float] +[[file-based-hosts-provider]] +===== File-based + +In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts` +setting, it is possible to provide a list of hosts via an external file. +Elasticsearch reloads this file when it changes, so that the list of seed nodes +can change dynamically without needing to restart each node. For example, this +gives a convenient mechanism for an Elasticsearch instance that is run in a +Docker container to be dynamically supplied with a list of IP addresses to +connect to for Zen discovery when those IP addresses may not be known at node +startup. + +To enable file-based discovery, configure the `file` hosts provider as follows: + +[source,txt] +---------------------------------------------------------------- +discovery.zen.hosts_provider: file +---------------------------------------------------------------- + +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described +below. Any time a change is made to the `unicast_hosts.txt` file the new +changes will be picked up by Elasticsearch and the new hosts list will be used. + +Note that the file-based discovery plugin augments the unicast hosts list in +`elasticsearch.yml`: if there are valid unicast host entries in +`discovery.zen.ping.unicast.hosts` then they will be used in addition to those +supplied in `unicast_hosts.txt`. + +The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS +lookups for nodes specified by address via file-based discovery. This is +specified as a <> and defaults to 5s. + +The format of the file is to specify one node entry per line. Each node entry +consists of the host (host name or IP address) and an optional transport port +number. If the port number is specified, is must come immediately after the +host (on the same line) separated by a `:`. If the port number is not +specified, a default value of 9300 is used. + +For example, this is an example of `unicast_hosts.txt` for a cluster with four +nodes that participate in unicast discovery, some of which are not running on +the default port: + +[source,txt] +---------------------------------------------------------------- +10.10.10.5 +10.10.10.6:9305 +10.10.10.5:10005 +# an IPv6 address +[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 +---------------------------------------------------------------- + +Host names are allowed instead of IP addresses (similar to +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in +brackets with the port coming after the brackets. + +It is also possible to add comments to this file. All comments must appear on +their lines starting with `#` (i.e. comments cannot start in the middle of a +line). [float] [[master-election]] ==== Master Election -As part of the ping process a master of the cluster is either -elected or joined to. This is done automatically. The -`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node -will wait before deciding on starting an election or joining an existing cluster. -Three pings will be sent over this timeout interval. In case where no decision can be -reached after the timeout, the pinging process restarts. -In slow or congested networks, three seconds might not be enough for a node to become -aware of the other nodes in its environment before making an election decision. -Increasing the timeout should be done with care in that case, as it will slow down the -election process. -Once a node decides to join an existing formed cluster, it -will send a join request to the master (`discovery.zen.join_timeout`) -with a timeout defaulting at 20 times the ping timeout. - -When the master node stops or has encountered a problem, the cluster nodes -start pinging again and will elect a new master. This pinging round also -serves as a protection against (partial) network failures where a node may unjustly -think that the master has failed. In this case the node will simply hear from -other nodes about the currently active master. - -If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master -eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is +As part of the ping process a master of the cluster is either elected or joined +to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults +to `3s`) determines how long the node will wait before deciding on starting an +election or joining an existing cluster. Three pings will be sent over this +timeout interval. In case where no decision can be reached after the timeout, +the pinging process restarts. In slow or congested networks, three seconds +might not be enough for a node to become aware of the other nodes in its +environment before making an election decision. Increasing the timeout should +be done with care in that case, as it will slow down the election process. Once +a node decides to join an existing formed cluster, it will send a join request +to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 +times the ping timeout. + +When the master node stops or has encountered a problem, the cluster nodes start +pinging again and will elect a new master. This pinging round also serves as a +protection against (partial) network failures where a node may unjustly think +that the master has failed. In this case the node will simply hear from other +nodes about the currently active master. + +If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from +nodes that are not master eligible (nodes where `node.master` is `false`) are +ignored during master election; the default value is `false`. + +Nodes can be excluded from becoming a master by setting `node.master` to `false`. -Nodes can be excluded from becoming a master by setting `node.master` to `false`. - -The `discovery.zen.minimum_master_nodes` sets the minimum -number of master eligible nodes that need to join a newly elected master in order for an election to -complete and for the elected node to accept its mastership. The same setting controls the minimum number of -active master eligible nodes that should be a part of any active cluster. If this requirement is not met the -active master node will step down and a new master election will begin. +The `discovery.zen.minimum_master_nodes` sets the minimum number of master +eligible nodes that need to join a newly elected master in order for an election +to complete and for the elected node to accept its mastership. The same setting +controls the minimum number of active master eligible nodes that should be a +part of any active cluster. If this requirement is not met the active master +node will step down and a new master election will begin. This setting must be set to a <> of your master eligible nodes. It is recommended to avoid having only two master eligible -nodes, since a quorum of two is two. Therefore, a loss of either master -eligible node will result in an inoperable cluster. +nodes, since a quorum of two is two. Therefore, a loss of either master eligible +node will result in an inoperable cluster. [float] [[fault-detection]] ==== Fault Detection -There are two fault detection processes running. The first is by the -master, to ping all the other nodes in the cluster and verify that they -are alive. And on the other end, each node pings to master to verify if -its still alive or an election process needs to be initiated. +There are two fault detection processes running. The first is by the master, to +ping all the other nodes in the cluster and verify that they are alive. And on +the other end, each node pings to master to verify if its still alive or an +election process needs to be initiated. The following settings control the fault detection process using the `discovery.zen.fd` prefix: @@ -116,19 +184,21 @@ considered failed. Defaults to `3`. The master node is the only node in a cluster that can make changes to the cluster state. The master node processes one cluster state update at a time, -applies the required changes and publishes the updated cluster state to all -the other nodes in the cluster. Each node receives the publish message, acknowledges -it, but does *not* yet apply it. If the master does not receive acknowledgement from -at least `discovery.zen.minimum_master_nodes` nodes within a certain time (controlled by -the `discovery.zen.commit_timeout` setting and defaults to 30 seconds) the cluster state -change is rejected. - -Once enough nodes have responded, the cluster state is committed and a message will -be sent to all the nodes. The nodes then proceed to apply the new cluster state to their -internal state. The master node waits for all nodes to respond, up to a timeout, before -going ahead processing the next updates in the queue. The `discovery.zen.publish_timeout` is -set by default to 30 seconds and is measured from the moment the publishing started. Both -timeout settings can be changed dynamically through the <> +applies the required changes and publishes the updated cluster state to all the +other nodes in the cluster. Each node receives the publish message, acknowledges +it, but does *not* yet apply it. If the master does not receive acknowledgement +from at least `discovery.zen.minimum_master_nodes` nodes within a certain time +(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30 +seconds) the cluster state change is rejected. + +Once enough nodes have responded, the cluster state is committed and a message +will be sent to all the nodes. The nodes then proceed to apply the new cluster +state to their internal state. The master node waits for all nodes to respond, +up to a timeout, before going ahead processing the next updates in the queue. +The `discovery.zen.publish_timeout` is set by default to 30 seconds and is +measured from the moment the publishing started. Both timeout settings can be +changed dynamically through the <> [float] [[no-master-block]] @@ -143,10 +213,14 @@ rejected when there is no active master. The `discovery.zen.no_master_block` setting has two valid options: [horizontal] -`all`:: All operations on the node--i.e. both read & writes--will be rejected. This also applies for api cluster state -read or write operations, like the get index settings, put mapping and cluster state api. -`write`:: (default) Write operations will be rejected. Read operations will succeed, based on the last known cluster configuration. -This may result in partial reads of stale data as this node may be isolated from the rest of the cluster. - -The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis (for example cluster stats, node info and -node stats apis). Requests to these apis will not be blocked and can run on any available node. +`all`:: All operations on the node--i.e. both read & writes--will be rejected. +This also applies for api cluster state read or write operations, like the get +index settings, put mapping and cluster state api. +`write`:: (default) Write operations will be rejected. Read operations will +succeed, based on the last known cluster configuration. This may result in +partial reads of stale data as this node may be isolated from the rest of the +cluster. + +The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis +(for example cluster stats, node info and node stats apis). Requests to these +apis will not be blocked and can run on any available node. diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index 76e0840793996..038a4b24a853f 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -51,7 +51,7 @@ NOTE: These settings only take effect on a full cluster restart. === Dangling indices -When a node joins the cluster, any shards stored in its local data directory +When a node joins the cluster, any shards stored in its local data directory which do not already exist in the cluster will be imported into the cluster. This functionality is intended as a best effort to help users who lose all master nodes. If a new master node is started which is unaware of diff --git a/docs/reference/monitoring/exporters.asciidoc b/docs/reference/monitoring/exporters.asciidoc index 2a7729eee9425..a1d4bc08ae73f 100644 --- a/docs/reference/monitoring/exporters.asciidoc +++ b/docs/reference/monitoring/exporters.asciidoc @@ -105,12 +105,12 @@ route monitoring data: [options="header"] |======================= -| Template | Purpose -| `.monitoring-alerts` | All cluster alerts for monitoring data. -| `.monitoring-beats` | All Beats monitoring data. -| `.monitoring-es` | All {es} monitoring data. -| `.monitoring-kibana` | All {kib} monitoring data. -| `.monitoring-logstash` | All Logstash monitoring data. +| Template | Purpose +| `.monitoring-alerts` | All cluster alerts for monitoring data. +| `.monitoring-beats` | All Beats monitoring data. +| `.monitoring-es` | All {es} monitoring data. +| `.monitoring-kibana` | All {kib} monitoring data. +| `.monitoring-logstash` | All Logstash monitoring data. |======================= The templates are ordinary {es} templates that control the default settings and diff --git a/docs/reference/monitoring/http-export.asciidoc b/docs/reference/monitoring/http-export.asciidoc index 4dfe1a0c537ea..4ba93f326370c 100644 --- a/docs/reference/monitoring/http-export.asciidoc +++ b/docs/reference/monitoring/http-export.asciidoc @@ -96,7 +96,7 @@ see <>. [[http-exporter-dns]] ==== Using DNS Hosts in HTTP Exporters -{monitoring} runs inside of the the JVM security manager. When the JVM has the +{monitoring} runs inside of the JVM security manager. When the JVM has the security manager enabled, the JVM changes the duration so that it caches DNS lookups indefinitely (for example, the mapping of a DNS hostname to an IP address). For this reason, if you are in an environment where the DNS response diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index 40bd15532984e..de78d80284ed2 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -41,5 +41,5 @@ WARNING: `span_multi` queries will hit too many clauses failure if the number of boolean query limit (defaults to 1024).To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will -rewrite any prefix query on the field to a a single term query that matches the indexed prefix. +rewrite any prefix query on the field to a single term query that matches the indexed prefix. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index a5a8e4d008ac4..1a932fdd41400 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -503,3 +503,55 @@ guide to the {painless}/index.html[Painless Scripting Language]. See the {painless}/painless-api-reference.html[Painless API Reference] in the guide to the {painless}/index.html[Painless Scripting Language]. + +[role="exclude", id="security-api-roles"] +=== Role management APIs + +You can use the following APIs to add, remove, and retrieve roles in the native realm: + +* <>, <> +* <> +* <> + +[role="exclude",id="security-api-tokens"] +=== Token management APIs + +You can use the following APIs to create and invalidate bearer tokens for access +without requiring basic authentication: + +* <>, <> + +[role="exclude",id="security-api-users"] +=== User Management APIs + +You can use the following APIs to create, read, update, and delete users from the +native realm: + +* <>, <> +* <>, <> +* <> +* <> + +[role="exclude",id="security-api-role-mapping"] +=== Role mapping APIs + +You can use the following APIs to add, remove, and retrieve role mappings: + +* <>, <> +* <> + +[role="exclude",id="security-api-privileges"] +=== Privilege APIs + +See <>. + +[role="exclude",id="xpack-commands"] +=== X-Pack commands + +See <>. + +[role="exclude",id="ml-api-definitions"] +=== Machine learning API definitions + +See <>. + diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc new file mode 100644 index 0000000000000..4eeedc5539992 --- /dev/null +++ b/docs/reference/rest-api/defs.asciidoc @@ -0,0 +1,27 @@ +[role="xpack"] +[[api-definitions]] +== Definitions + +These resource definitions are used in {ml} and {security} APIs and in {kib} +advanced {ml} job configuration options. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::{es-repo-dir}/ml/apis/calendarresource.asciidoc[] +include::{es-repo-dir}/ml/apis/datafeedresource.asciidoc[] +include::{es-repo-dir}/ml/apis/filterresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobcounts.asciidoc[] +include::{es-repo-dir}/ml/apis/snapshotresource.asciidoc[] +include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] +include::{es-repo-dir}/ml/apis/resultsresource.asciidoc[] +include::{es-repo-dir}/ml/apis/eventresource.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 9ec57940dd299..b80e8badf5bb3 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -22,8 +22,8 @@ include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] -include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] -include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] +include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] -include::{xes-repo-dir}/rest-api/defs.asciidoc[] +include::defs.asciidoc[] diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc new file mode 100644 index 0000000000000..1d372a03ddcfb --- /dev/null +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -0,0 +1,38 @@ +[role="xpack"] +[testenv="basic"] +[[rollup-api-quickref]] +== API Quick Reference + +experimental[] + +Most {rollup} endpoints have the following base: + +[source,js] +---- +/_xpack/rollup/ +---- +// NOTCONSOLE + +[float] +[[rollup-api-jobs]] +=== /job/ + +* {ref}/rollup-put-job.html[PUT /_xpack/rollup/job/+++]: Create a job +* {ref}/rollup-get-job.html[GET /_xpack/rollup/job]: List jobs +* {ref}/rollup-get-job.html[GET /_xpack/rollup/job/+++]: Get job details +* {ref}/rollup-start-job.html[POST /_xpack/rollup/job//_start]: Start a job +* {ref}/rollup-stop-job.html[POST /_xpack/rollup/job//_stop]: Stop a job +* {ref}/rollup-delete-job.html[DELETE /_xpack/rollup/job/+++]: Delete a job + +[float] +[[rollup-api-data]] +=== /data/ + +* {ref}/rollup-get-rollup-caps.html[GET /_xpack/rollup/data//_rollup_caps+++]: Get Rollup Capabilities +* {ref}/rollup-get-rollup-index-caps.html[GET //_rollup/data/+++]: Get Rollup Index Capabilities + +[float] +[[rollup-api-index]] +=== // + +* {ref}/rollup-search.html[GET //_rollup_search]: Search rollup data diff --git a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/delete-job.asciidoc rename to docs/reference/rollup/apis/delete-job.asciidoc index b795e0b28c760..37774560848c5 100644 --- a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-delete-job]] === Delete Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/get-job.asciidoc rename to docs/reference/rollup/apis/get-job.asciidoc index 96053dbfea64f..794d72480121b 100644 --- a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-job]] === Get Rollup Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/rollup/put-job.asciidoc rename to docs/reference/rollup/apis/put-job.asciidoc index 1449acadc636d..79e30ae8dc99b 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-put-job]] === Create Job API ++++ @@ -43,6 +44,8 @@ started with the <>. `metrics`:: (object) Defines the metrics that should be collected for each grouping tuple. See <>. +For more details about the job configuration, see <>. + ==== Authorization You must have `manage` or `manage_rollup` cluster privileges to use this API. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc rename to docs/reference/rollup/apis/rollup-caps.asciidoc index f770adf1f0d1c..907efb94c1776 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-rollup-caps]] === Get Rollup Job Capabilities ++++ @@ -27,8 +28,8 @@ live? ==== Path Parameters `index`:: - (string) Index, indices or index-pattern to return rollup capabilities for. If omitted (or `_all` is used) all available - rollup job capabilities will be returned + (string) Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch + rollup capabilities from all jobs ==== Request Body diff --git a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc rename to docs/reference/rollup/apis/rollup-index-caps.asciidoc index 4636d9775e9d3..e5ca70cd59cda 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -26,15 +26,13 @@ This API will allow you to determine: `index`:: (string) Index or index-pattern of concrete rollup indices to check for capabilities. - - ==== Request Body There is no request body for the Get Jobs API. ==== Authorization -You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. +You must have the `read` index privilege on the index that stores the rollup results. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc similarity index 80% rename from x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc rename to docs/reference/rollup/apis/rollup-job-config.asciidoc index ef0ea6f00f7ce..3a917fb59f214 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-job-config]] === Rollup Job Configuration @@ -23,7 +24,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -82,6 +83,12 @@ In the above example, there are several pieces of logistical configuration for t will tend to execute faster, but will require more memory during processing. This has no effect on how the data is rolled up, it is merely used for tweaking the speed/memory cost of the indexer. +[NOTE] +The `index_pattern` cannot be a pattern that would also match the destination `rollup_index`. E.g. the pattern +`"foo-*"` would match the rollup index `"foo-rollup"`. This causes problems because the rollup job would attempt +to rollup it's own data at runtime. If you attempt to configure a pattern that matches the `rollup_index`, an exception +will be thrown to prevent this behavior. + [[rollup-groups-config]] ==== Grouping Config @@ -93,7 +100,7 @@ fields will then be available later for aggregating into buckets. For example, "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -127,9 +134,9 @@ The `date_histogram` group has several parameters: The date field that is to be rolled up. `interval` (required):: - The interval of time buckets to be generated when rolling up. E.g. `"1h"` will produce hourly rollups. This follows standard time formatting - syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"1h"`) - intervals are configured, <> can execute aggregations with 1hr or greater (weekly, monthly, etc) intervals. + The interval of time buckets to be generated when rolling up. E.g. `"60m"` will produce 60 minute (hourly) rollups. This follows standard time formatting + syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"60m"`) + intervals are configured, <> can execute aggregations with 60m or greater (weekly, monthly, etc) intervals. So define the interval as the smallest unit that you wish to later query. Note: smaller, more granular intervals take up proportionally more space. @@ -148,6 +155,46 @@ The `date_histogram` group has several parameters: to be stored with a specific timezone. By default, rollup documents are stored in `UTC`, but this can be changed with the `time_zone` parameter. +.Calendar vs Fixed time intervals +********************************** +Elasticsearch understands both "calendar" and "fixed" time intervals. Fixed time intervals are fairly easy to understand; +`"60s"` means sixty seconds. But what does `"1M` mean? One month of time depends on which month we are talking about, +some months are longer or shorter than others. This is an example of "calendar" time, and the duration of that unit +depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. + +This is important because the buckets generated by Rollup will be in either calendar or fixed intervals, and will limit +how you can query them later (see <>. + +We recommend sticking with "fixed" time intervals, since they are easier to understand and are more flexible at query +time. It will introduce some drift in your data during leap-events, and you will have to think about months in a fixed +quantity (30 days) instead of the actual calendar length... but it is often easier than dealing with calendar units +at query time. + +Multiples of units are always "fixed" (e.g. `"2h"` is always the fixed quantity `7200` seconds. Single units can be +fixed or calendar depending on the unit: + +[options="header"] +|======= +|Unit |Calendar |Fixed +|millisecond |NA |`1ms`, `10ms`, etc +|second |NA |`1s`, `10s`, etc +|minute |`1m` |`2m`, `10m`, etc +|hour |`1h` |`2h`, `10h`, etc +|day |`1d` |`2d`, `10d`, etc +|week |`1w` |NA +|month |`1M` |NA +|quarter |`1q` |NA +|year |`1y` |NA +|======= + +For some units where there are both fixed and calendar, you may need to express the quantity in terms of the next +smaller unit. For example, if you want a fixed day (not a calendar day), you should specify `24h` instead of `1d`. +Similarly, if you want fixed hours, specify `60m` instead of `1h`. This is because the single quantity entails +calendar time, and limits you to querying by calendar time in the future. + + +********************************** + ===== Terms The `terms` group can be used on `keyword` or numeric fields, to allow bucketing via the `terms` aggregation at a later point. The `terms` diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc rename to docs/reference/rollup/apis/rollup-search.asciidoc index 470cbc4eaf57d..8e7fc69a00a6b 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-search]] === Rollup Search ++++ @@ -34,7 +35,7 @@ or using `_all`, is not permitted The request body supports a subset of features from the regular Search API. It supports: -- `query` param for specifying an DSL query, subject to some limitations +- `query` param for specifying an DSL query, subject to some limitations (see <> and <> - `aggregations` param for specifying aggregations Functionality that is not available: @@ -101,6 +102,7 @@ GET /sensor_rollup/_rollup_search -------------------------------------------------- // CONSOLE // TEST[setup:sensor_prefab_data] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] The query is targeting the `sensor_rollup` data, since this contains the rollup data as configured in the job. A `max` aggregation has been used on the `temperature` field, yielding the following response: @@ -194,6 +196,7 @@ GET sensor-1,sensor_rollup/_rollup_search <1> -------------------------------------------------- // CONSOLE // TEST[continued] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] <1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time When the search is executed, the Rollup Search endpoint will do two things: diff --git a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/start-job.asciidoc rename to docs/reference/rollup/apis/start-job.asciidoc index 9a0a0a7e4f01c..cf44883895c4c 100644 --- a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-start-job]] === Start Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/stop-job.asciidoc rename to docs/reference/rollup/apis/stop-job.asciidoc index 6050740270503..5912b2d688b70 100644 --- a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-stop-job]] === Stop Job API ++++ diff --git a/x-pack/docs/en/rollup/index.asciidoc b/docs/reference/rollup/index.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/index.asciidoc rename to docs/reference/rollup/index.asciidoc index 9ac89341bfe99..64dc233f82f6e 100644 --- a/x-pack/docs/en/rollup/index.asciidoc +++ b/docs/reference/rollup/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[xpack-rollup]] = Rolling up historical data diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/docs/reference/rollup/overview.asciidoc similarity index 98% rename from x-pack/docs/en/rollup/overview.asciidoc rename to docs/reference/rollup/overview.asciidoc index a3f29f23bd107..b2570f647e72b 100644 --- a/x-pack/docs/en/rollup/overview.asciidoc +++ b/docs/reference/rollup/overview.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-overview]] == Overview @@ -20,6 +22,7 @@ So while the cost of storing a millisecond of sensor data from ten years ago is reading often diminishes with time. It's not useless -- it could easily contribute to a useful analysis -- but it's reduced value often leads to deletion rather than paying the fixed storage cost. +[float] === Rollup store historical data at reduced granularity That's where Rollup comes into play. The Rollup functionality summarizes old, high-granularity data into a reduced @@ -35,6 +38,7 @@ automates this process of summarizing historical data. Details about setting up and configuring Rollup are covered in <> +[float] === Rollup uses standard query DSL The Rollup feature exposes a new search endpoint (`/_rollup_search` vs the standard `/_search`) which knows how to search @@ -48,6 +52,7 @@ are covered more in <>. But if your queries, aggregations and dashboards only use the available functionality, redirecting them to historical data is trivial. +[float] === Rollup merges "live" and "rolled" data A useful feature of Rollup is the ability to query both "live", realtime data in addition to historical "rolled" data @@ -61,6 +66,7 @@ would only see data older than a month. The RollupSearch endpoint, however, sup It will take the results from both data sources and merge them together. If there is overlap between the "live" and "rolled" data, live data is preferred to increase accuracy. +[float] === Rollup is multi-interval aware Finally, Rollup is capable of intelligently utilizing the best interval available. If you've worked with summarizing diff --git a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc b/docs/reference/rollup/rollup-agg-limitations.asciidoc similarity index 94% rename from x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc rename to docs/reference/rollup/rollup-agg-limitations.asciidoc index cd20622d93c8d..9f8b6f66adeeb 100644 --- a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc +++ b/docs/reference/rollup/rollup-agg-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-agg-limitations]] == Rollup Aggregation Limitations diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc similarity index 54% rename from x-pack/docs/en/rest-api/rollup-api.asciidoc rename to docs/reference/rollup/rollup-api.asciidoc index f1cd7c285a733..099686fb4329d 100644 --- a/x-pack/docs/en/rest-api/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-apis]] == Rollup APIs @@ -16,6 +17,7 @@ === Data * <> +* <> [float] [[rollup-search-endpoint]] @@ -25,11 +27,12 @@ -include::rollup/delete-job.asciidoc[] -include::rollup/get-job.asciidoc[] -include::rollup/put-job.asciidoc[] -include::rollup/start-job.asciidoc[] -include::rollup/stop-job.asciidoc[] -include::rollup/rollup-caps.asciidoc[] -include::rollup/rollup-search.asciidoc[] -include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file +include::apis/delete-job.asciidoc[] +include::apis/get-job.asciidoc[] +include::apis/put-job.asciidoc[] +include::apis/start-job.asciidoc[] +include::apis/stop-job.asciidoc[] +include::apis/rollup-caps.asciidoc[] +include::apis/rollup-index-caps.asciidoc[] +include::apis/rollup-search.asciidoc[] +include::apis/rollup-job-config.asciidoc[] diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc similarity index 83% rename from x-pack/docs/en/rollup/rollup-getting-started.asciidoc rename to docs/reference/rollup/rollup-getting-started.asciidoc index 24f68dddd8101..8f99bc2c010ce 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-getting-started]] == Getting Started @@ -37,8 +39,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", - "delay": "7d" + "interval": "60m" }, "terms": { "fields": ["node"] @@ -66,7 +67,7 @@ The `cron` parameter controls when and how often the job activates. When a roll from where it left off after the last activation. So if you configure the cron to run every 30 seconds, the job will process the last 30 seconds worth of data that was indexed into the `sensor-*` indices. -If instead the cron was configured to run once a day at midnight, the job would process the last 24hours worth of data. The choice is largely +If instead the cron was configured to run once a day at midnight, the job would process the last 24 hours worth of data. The choice is largely preference, based on how "realtime" you want the rollups, and if you wish to process continuously or move it to off-peak hours. Next, we define a set of `groups` and `metrics`. The metrics are fairly straightforward: we want to save the min/max/sum of the `temperature` @@ -79,7 +80,7 @@ It also allows us to run terms aggregations on the `node` field. .Date histogram interval vs cron schedule ********************************** You'll note that the job's cron is configured to run every 30 seconds, but the date_histogram is configured to -rollup at hourly intervals. How do these relate? +rollup at 60 minute intervals. How do these relate? The date_histogram controls the granularity of the saved data. Data will be rolled up into hourly intervals, and you will be unable to query with finer granularity. The cron simply controls when the process looks for new data to rollup. Every 30 seconds it will see @@ -223,70 +224,71 @@ Which returns a corresponding response: [source,js] ---- { - "took" : 93, - "timed_out" : false, - "terminated_early" : false, - "_shards" : ... , - "hits" : { - "total" : 0, - "max_score" : 0.0, - "hits" : [ ] - }, - "aggregations" : { - "timeline" : { - "meta" : { }, - "buckets" : [ - { - "key_as_string" : "2018-01-18T00:00:00.000Z", - "key" : 1516233600000, - "doc_count" : 6, - "nodes" : { - "doc_count_error_upper_bound" : 0, - "sum_other_doc_count" : 0, - "buckets" : [ - { - "key" : "a", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 5.1499998569488525 - } - }, - { - "key" : "b", - "doc_count" : 2, - "max_temperature" : { - "value" : 201.0 - }, - "avg_voltage" : { - "value" : 5.700000047683716 - } - }, - { - "key" : "c", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 4.099999904632568 - } - } - ] - } - } - ] - } - } + "took" : 93, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "timeline" : { + "meta" : { }, + "buckets" : [ + { + "key_as_string" : "2018-01-18T00:00:00.000Z", + "key" : 1516233600000, + "doc_count" : 6, + "nodes" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "a", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 5.1499998569488525 + } + }, + { + "key" : "b", + "doc_count" : 2, + "max_temperature" : { + "value" : 201.0 + }, + "avg_voltage" : { + "value" : 5.700000047683716 + } + }, + { + "key" : "c", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 4.099999904632568 + } + } + ] + } + } + ] + } + } } + ---- // TESTRESPONSE[s/"took" : 93/"took" : $body.$_path/] // TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] In addition to being more complicated (date histogram and a terms aggregation, plus an additional average metric), you'll notice -the date_histogram uses a `7d` interval instead of `1h`. +the date_histogram uses a `7d` interval instead of `60m`. [float] === Conclusion diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc similarity index 80% rename from x-pack/docs/en/rollup/rollup-search-limitations.asciidoc rename to docs/reference/rollup/rollup-search-limitations.asciidoc index 57ba23eebccbe..43feeab9a2eef 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-search-limitations]] == Rollup Search Limitations @@ -80,9 +82,25 @@ The response will tell you that the field and aggregation were not possible, bec [float] === Interval Granularity -Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. If data is rolled up at hourly -intervals, the <> API can aggregate on any time interval hourly or greater. Intervals that are less than an hour will throw -an exception, since the data simply doesn't exist for finer granularities. +Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. This means you +can only search/aggregate the rollup data with an interval that is greater-than or equal to the configured rollup interval. + +For example, if data is rolled up at hourly intervals, the <> API can aggregate on any time interval +hourly or greater. Intervals that are less than an hour will throw an exception, since the data simply doesn't +exist for finer granularities. + +[[rollup-search-limitations-intervals]] +.Requests must be multiples of the config +********************************** +Perhaps not immediately apparent, but the interval specified in an aggregation request must be a whole +multiple of the configured interval. If the job was configured to rollup on `3d` intervals, you can only +query and aggregate on multiples of three (`3d`, `6d`, `9d`, etc). + +A non-multiple wouldn't work, since the rolled up data wouldn't cleanly "overlap" with the buckets generated +by the aggregation, leading to incorrect results. + +For that reason, an error is thrown if a whole multiple of the configured interval isn't found. +********************************** Because the RollupSearch endpoint can "upsample" intervals, there is no need to configure jobs with multiple intervals (hourly, daily, etc). It's recommended to just configure a single job with the smallest granularity that is needed, and allow the search endpoint to upsample diff --git a/x-pack/docs/en/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/understanding-groups.asciidoc rename to docs/reference/rollup/understanding-groups.asciidoc index 803555b2d73f7..6321ab9b00f53 100644 --- a/x-pack/docs/en/rollup/understanding-groups.asciidoc +++ b/docs/reference/rollup/understanding-groups.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-understanding-groups]] == Understanding Groups diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index c68cf0daaf55a..8771915dee696 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -86,6 +86,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. +The request parameter `max_concurrent_shard_requests` can be used to control the +maximum number of concurrent shard requests the each sub search request will execute. +This parameter should be used to protect a single request from overloading a cluster +(e.g., a default request will hit all indices in a cluster which could cause shard request rejections +if the number of shards per node is high). This default is based on the number of +data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through +concurrent request such that this protection will result in poor performance. For +instance in an environment where only a very low number of concurrent search requests are expected +it might help to increase this value to a higher number. + [float] [[msearch-security]] === Security diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 2a51d705d83ec..ad24d9c93c6b6 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -90,7 +90,8 @@ And here is a sample response: Set to `false` to return an overall failure if the request would produce partial results. Defaults to true, which will allow partial results in the case of timeouts - or partial failures. + or partial failures. This default can be controlled using the cluster-level setting + `search.default_allow_partial_results`. `terminate_after`:: @@ -160,7 +161,7 @@ be set to `true` in the response. }, "hits": { "total": 1, - "max_score": 0.0, + "max_score": null, "hits": [] } } diff --git a/docs/reference/search/request/collapse.asciidoc b/docs/reference/search/request/collapse.asciidoc index 192495e5d6d0d..1ab79e36c7e9d 100644 --- a/docs/reference/search/request/collapse.asciidoc +++ b/docs/reference/search/request/collapse.asciidoc @@ -217,4 +217,4 @@ Response: -------------------------------------------------- // NOTCONSOLE -NOTE: Second level of of collapsing doesn't allow `inner_hits`. \ No newline at end of file +NOTE: Second level of collapsing doesn't allow `inner_hits`. \ No newline at end of file diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index fa5baf1db2262..bcfcb20d1d53b 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -30,6 +30,27 @@ GET /_search Doc value fields can work on fields that are not stored. +`*` can be used as a wild card, for example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query" : { + "match_all": {} + }, + "docvalue_fields" : [ + { + "field": "*field", <1> + "format": "use_field_mapping" <2> + } + ] +} +-------------------------------------------------- +// CONSOLE +<1> Match all fields ending with `field` +<2> Format to be applied to all matching fields. + Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 9f9833bde9d5c..c52f28bc7bea4 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -258,7 +258,7 @@ Which should look like: }, "hits": { "total" : 0, - "max_score" : 0.0, + "max_score" : null, "hits" : [] }, "suggest": { diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index a90f32bb3cd36..279bc0c0384c1 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -125,5 +125,6 @@ more details on the different types of search that can be performed. |`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce partial results. Defaults to true, which will allow partial results in the case of timeouts -or partial failures.. +or partial failures. This default can be controlled using the cluster-level setting +`search.default_allow_partial_results`. |======================================================================= diff --git a/docs/reference/settings/security-hash-settings.asciidoc b/docs/reference/settings/security-hash-settings.asciidoc new file mode 100644 index 0000000000000..061ca38d545c7 --- /dev/null +++ b/docs/reference/settings/security-hash-settings.asciidoc @@ -0,0 +1,84 @@ +[float] +[[hashing-settings]] +==== User cache and password hash algorithms + +Certain realms store user credentials in memory. To limit exposure +to credential theft and mitigate credential compromise, the cache only stores +a hashed version of the user credentials in memory. By default, the user cache +is hashed with a salted `sha-256` hash algorithm. You can use a different +hashing algorithm by setting the `cache.hash_algo` realm settings to any of the +following values: + +[[cache-hash-algo]] +.Cache hash algorithms +|======================= +| Algorithm | | | Description +| `ssha256` | | | Uses a salted `sha-256` algorithm (default). +| `md5` | | | Uses `MD5` algorithm. +| `sha1` | | | Uses `SHA1` algorithm. +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in + memory. CAUTION: keeping clear text is considered insecure + and can be compromised at the OS level (for example through + memory dumps and using `ptrace`). +|======================= + +Likewise, realms that store passwords hash them using cryptographically strong +and password-specific salt values. You can configure the algorithm for password +hashing by setting the `xpack.security.authc.password_hashing.algorithm` setting +to one of the following: + +[[password-hashing-algorithms]] +.Password hashing algorithms +|======================= +| Algorithm | | | Description + +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. (default) +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `bcrypt10` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt11` | | | Uses `bcrypt` algorithm with salt generated in 2048 rounds. +| `bcrypt12` | | | Uses `bcrypt` algorithm with salt generated in 4096 rounds. +| `bcrypt13` | | | Uses `bcrypt` algorithm with salt generated in 8192 rounds. +| `bcrypt14` | | | Uses `bcrypt` algorithm with salt generated in 16384 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +|======================= + + diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 4f29b0549b3a8..1fc441a06226d 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -46,12 +46,21 @@ settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already omits all `ssl` settings, `bind_dn`, and `bind_password` due to the sensitive nature of the information. +`xpack.security.fips_mode.enabled`:: +Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. + [float] [[password-security-settings]] ==== Default password security settings `xpack.security.authc.accept_default_password`:: In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password. +[[password-hashing-settings]] +==== Password hashing settings +`xpack.security.authc.password_hashing.algorithm`:: +Specifies the hashing algorithm that is used for secure user credential storage. +See <>. Defaults to `bcrypt`. + [float] [[anonymous-access-settings]] ==== Anonymous access settings @@ -164,9 +173,8 @@ the standard {es} <>. Defaults to `20m`. cache at any given time. Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the -in-memory cached user credentials. For possible values, see -{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to -`ssha256`. +in-memory cached user credentials. For possible values, see <>. +Defaults to `ssha256`. [[ref-users-settings]] @@ -190,8 +198,7 @@ Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the in-memory cached -user credentials. See the {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for -all possible values. Defaults to `ssha256`. +user credentials. See <>. Defaults to `ssha256`. [[ref-ldap-settings]] [float] @@ -239,6 +246,13 @@ This setting is multivalued; you can specify multiple user contexts. Required to operate in user template mode. If `user_search.base_dn` is specified, this setting is not valid. For more information on the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. + +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the LDAP realm does not perform role mapping and +instead loads the user from the listed realms. The referenced realms are +consulted in the order that they are defined in this list. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + -- NOTE: If any settings starting with `user_search` are specified, the @@ -327,7 +341,7 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt `unmapped_groups_as_roles`:: If set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. A group is considered to be _unmapped_ if it is not -not referenced in a +referenced in a {xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based role mappings are not considered. Defaults to `false`. @@ -444,8 +458,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for the -in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] -table for all possible values. Defaults to `ssha256`. +in-memory cached user credentials. See <>. Defaults to `ssha256`. [[ref-ad-settings]] [float] @@ -473,7 +486,7 @@ this setting controls the amount of time to cache DNS lookups. Defaults to `1h`. `domain_name`:: -The domain name of Active Directory. If the the `url` and `user_search_dn` +The domain name of Active Directory. If the `url` and the `user_search_dn` settings are not specified, the cluster can derive those values from this setting. Required. @@ -684,7 +697,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for -the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`. +the in-memory cached user credentials. See <>. Defaults to `ssha256`. `follow_referrals`:: If set to `true` {security} follows referrals returned by the LDAP server. @@ -727,6 +740,12 @@ Specifies the {xpack-ref}/security-files.html[location] of the {xpack-ref}/mapping-roles.html[YAML role mapping configuration file]. Defaults to `ES_PATH_CONF/role_mapping.yml`. +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the PKI realm does not perform role mapping and +instead loads the user from the listed realms. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + `cache.ttl`:: Specifies the time-to-live for cached user entries. A user and a hash of its credentials are cached for this period of time. Use the @@ -850,11 +869,26 @@ Defaults to `false`. Specifies whether to populate the {es} user's metadata with the values that are provided by the SAML attributes. Defaults to `true`. +`authorization_realms`:: +The names of the realms that should be consulted for delegate authorization. +If this setting is used, then the SAML realm does not perform role mapping and +instead loads the user from the listed realms. +See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] + `allowed_clock_skew`:: The maximum amount of skew that can be tolerated between the IdP's clock and the {es} node's clock. Defaults to `3m` (3 minutes). +`req_authn_context_class_ref`:: +A comma separated list of Authentication Context Class Reference values to be +included in the Requested Authentication Context when requesting the IdP to +authenticate the current user. The Authentication Context of the corresponding +authentication response should contain at least one of the requested values. ++ +For more information, see +{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods]. + [float] [[ref-saml-signing-settings]] ===== SAML realm signing settings @@ -1056,6 +1090,33 @@ Specifies the supported protocols for TLS/SSL. Specifies the cipher suites that should be supported. +[float] +[[ref-kerberos-settings]] +===== Kerberos realm settings + +For a Kerberos realm, the `type` must be set to `kerberos`. In addition to the +<>, you can specify +the following settings: + +`keytab.path`:: Specifies the path to the Kerberos keytab file that contains the +service principal used by this {es} node. This must be a location within the +{es} configuration directory and the file must have read permissions. Required. + +`remove_realm_name`:: Set to `true` to remove the realm part of principal names. +Principal names in Kerberos have the form `user/instance@REALM`. If this option +is `true`, the realm part (`@REALM`) will not be included in the username. +Defaults to `false`. + +`krb.debug`:: Set to `true` to enable debug logs for the Java login module that +provides support for Kerberos authentication. Defaults to `false`. + +`cache.ttl`:: The time-to-live for cached user entries. A user is cached for +this period of time. Specify the time period using the standard {es} +<>. Defaults to `20m`. + +`cache.max_users`:: The maximum number of user entries that can live in the +cache at any given time. Defaults to 100,000. + [float] [[load-balancing]] ===== Load balancing and failover @@ -1094,7 +1155,12 @@ settings such as those for HTTP or Transport. `xpack.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, -`TLSv1`. +`TLSv1`. ++ +-- +NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` +or `SSLv3`. See <>. +-- `xpack.ssl.client_authentication`:: Controls the server's behavior in regard to requesting a certificate @@ -1193,6 +1259,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use Java +keystore files. See <>. + [float] ===== PKCS#12 files @@ -1231,6 +1300,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use PKCS#12 +keystore files. See <>. + [[pkcs12-truststore-note]] [NOTE] Storing trusted certificates in a PKCS#12 file, although supported, is @@ -1308,3 +1380,5 @@ List of IP addresses to allow for this profile. `transport.profiles.$PROFILE.xpack.security.filter.deny`:: List of IP addresses to deny for this profile. + +include::security-hash-settings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index a8b8dd82d6172..f0e5cfc71c999 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -155,6 +155,11 @@ the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. +Alternatively, the maximum map count check is only needed if you are using +`mmapfs` as the <> for your indices. If you +<> the use of `mmapfs` then this bootstrap check will +not be enforced. + === Client JVM check There are two different JVMs provided by OpenJDK-derived JVMs: the diff --git a/docs/reference/setup/important-settings/heap-dump-path.asciidoc b/docs/reference/setup/important-settings/heap-dump-path.asciidoc index b0d301b21d0b8..fb8c7ff35f0d0 100644 --- a/docs/reference/setup/important-settings/heap-dump-path.asciidoc +++ b/docs/reference/setup/important-settings/heap-dump-path.asciidoc @@ -8,8 +8,8 @@ distributions, and the `data` directory under the root of the Elasticsearch installation for the <> archive distributions). If this path is not suitable for receiving heap dumps, you should modify the entry `-XX:HeapDumpPath=...` in -<>. If you specify a fixed filename instead -of a directory, the JVM will repeatedly use the same file; this is one -mechanism for preventing heap dumps from accumulating in the heap dump -path. Alternatively, you can configure a scheduled task via your OS to -remove heap dumps that are older than a configured age. +<>. If you specify a directory, the JVM +will generate a filename for the heap dump based on the PID of the running +instance. If you specify a fixed filename instead of a directory, the file must +not exist when the JVM needs to perform a heap dump on an out of memory +exception, otherwise the heap dump will fail. diff --git a/docs/reference/setup/important-settings/network-host.asciidoc b/docs/reference/setup/important-settings/network-host.asciidoc index 7e29e73123d8d..1788bfebc66b5 100644 --- a/docs/reference/setup/important-settings/network-host.asciidoc +++ b/docs/reference/setup/important-settings/network-host.asciidoc @@ -9,7 +9,7 @@ location on a single node. This can be useful for testing Elasticsearch's ability to form clusters, but it is not a configuration recommended for production. -In order to communicate and to form a cluster with nodes on other servers, your +In order to form a cluster with nodes on other servers, your node will need to bind to a non-loopback address. While there are many <>, usually all you need to configure is `network.host`: diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index c0ebfb60fa7b2..26a207824af01 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -41,6 +41,8 @@ Elasticsearch website or from our RPM repository. `msi`:: +beta[] ++ The `msi` package is suitable for installation on Windows 64-bit systems with at least .NET 4.5 framework installed, and is the easiest choice for getting started with Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index f5e248598ca9f..f2e9077e20ed1 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -47,21 +47,21 @@ aside panel with additional information for each input: image::images/msi_installer/msi_installer_help.png[] Within the first screen, select the directory for the installation. In addition, select directories for where -data, logs and configuration will reside or <>: +data, logs and configuration will be placed or <>: [[msi-installer-locations]] image::images/msi_installer/msi_installer_locations.png[] Then select whether to install as a service or start Elasticsearch manually as needed. When -installing as a service, you can also decide which account to run the service under as well -as whether the service should be started after installation and when Windows is started or -restarted: +installing as a service, you can also configure the Windows account to run the service with, +whether the service should be started after installation and the Windows startup behaviour: [[msi-installer-service]] image::images/msi_installer/msi_installer_service.png[] -IMPORTANT: When selecting an account to run the service with, be sure that the chosen account -has sufficient privileges to access the installation and other deployment directories chosen. +IMPORTANT: When selecting a Windows account to run the service with, be sure that the chosen account +has sufficient privileges to access the installation and other deployment directories chosen. Also +ensure the account is able to run Windows services. Common configuration settings are exposed within the Configuration section, allowing the cluster name, node name and roles to be set, in addition to memory and network settings: @@ -69,28 +69,26 @@ name, node name and roles to be set, in addition to memory and network settings: [[msi-installer-configuration]] image::images/msi_installer/msi_installer_configuration.png[] -A list of common plugins that can be downloaded and installed as -part of the installation, with the option to configure a HTTPS proxy through which to download: +A list of common plugins that can be downloaded and installed as part of the installation, with the option to configure a HTTPS proxy through which to download these plugins. + +TIP: Ensure the installation machine has access to the internet and that any corporate firewalls in place are configured to allow downloads from `artifacts.elastic.co`: [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -Upon choosing to install {xpack} plugin, an additional step allows a choice of the type of {xpack} -license to install, in addition to {security} configuration and built-in user configuration: +As of version 6.3.0, X-Pack is now https://www.elastic.co/products/x-pack/open[bundled by default]. The final step allows a choice of the type of X-Pack license to install, in addition to security configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: {xpack} includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the -https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. -The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. +NOTE: X-Pack includes a choice of a Trial or Basic license. A Trial license is valid for 30 days, after which you can obtain one of the available subscriptions. The Basic license is free and perpetual. Consult the https://www.elastic.co/subscriptions[available subscriptions] for further details on which features are available under which license. -After clicking the install button, the installer will begin installation: +After clicking the install button, the installation will begin: [[msi-installer-installing]] image::images/msi_installer/msi_installer_installing.png[] -and will indicate when it has been successfully installed: +...and will indicate when it has been successfully installed: [[msi-installer-success]] image::images/msi_installer/msi_installer_success.png[] @@ -107,7 +105,7 @@ then running: msiexec.exe /i elasticsearch-{version}.msi /qn -------------------------------------------- -By default, msiexec does not wait for the installation process to complete, since it runs in the +By default, `msiexec.exe` does not wait for the installation process to complete, since it runs in the Windows subsystem. To wait on the process to finish and ensure that `%ERRORLEVEL%` is set accordingly, it is recommended to use `start /wait` to create a process and wait for it to exit @@ -132,13 +130,13 @@ Supported Windows Installer command line arguments can be viewed using msiexec.exe /help -------------------------------------------- -or by consulting the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options]. +...or by consulting the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options]. [[msi-command-line-options]] ==== Command line options All settings exposed within the GUI are also available as command line arguments (referred to -as _properties_ within Windows Installer documentation) that can be passed to msiexec: +as _properties_ within Windows Installer documentation) that can be passed to `msiexec.exe`: [horizontal] `INSTALLDIR`:: @@ -282,47 +280,46 @@ as _properties_ within Windows Installer documentation) that can be passed to ms `XPACKLICENSE`:: - When installing {xpack} plugin, the type of license to install, - either `Basic` or `Trial`. Defaults to `Basic` + The type of X-Pack license to install, either `Basic` or `Trial`. Defaults to `Basic` `XPACKSECURITYENABLED`:: - When installing {xpack} plugin with a `Trial` license, whether {security} should be enabled. + When installing with a `Trial` license, whether X-Pack Security should be enabled. Defaults to `true` `BOOTSTRAPPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to + When installing with a `Trial` license and X-Pack Security enabled, the password to used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. Defaults to a randomized value. `SKIPSETTINGPASSWORDS`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, whether the + When installing with a `Trial` license and X-Pack Security enabled, whether the installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `elastic`. Defaults to `""` `KIBANAUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `kibana`. Defaults to `""` `LOGSTASHSYSTEMUSERPASSWORD`:: - When installing {xpack} plugin with a `Trial` license and {security} enabled, the password + When installing with a `Trial` license and X-Pack Security enabled, the password to use for the built-in user `logstash_system`. Defaults to `""` To pass a value, simply append the property name and value using the format `=""` to -the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[{xpack}]: +the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: ["source","sh",subs="attributes,callouts"] -------------------------------------------- -start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory" PLUGINS="x-pack" +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory\{version}" PLUGINS="x-pack" -------------------------------------------- Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options] @@ -330,9 +327,10 @@ for additional rules related to values containing quotation marks. ifdef::include-xpack[] [[msi-installer-enable-indices]] -==== Enable automatic creation of {xpack} indices +==== Enable automatic creation of X-Pack indices + -{xpack} will try to automatically create a number of indices within {es}. +X-Pack will try to automatically create a number of indices within Elasticsearch. include::xpack-indices.asciidoc[] endif::include-xpack[] @@ -344,7 +342,7 @@ include::msi-windows-start.asciidoc[] ==== Configuring Elasticsearch on the command line Elasticsearch loads its configuration from the `%ES_PATH_CONF%\elasticsearch.yml` -file by default. The format of this config file is explained in +file by default. The format of this config file is explained in <>. Any settings that can be specified in the config file can also be specified on @@ -393,10 +391,11 @@ with PowerShell: [source,powershell] -------------------------------------------- -Get-Service Elasticsearch | Stop-Service | Start-Service +Get-Service Elasticsearch | Stop-Service +Get-Service Elasticsearch | Start-Service -------------------------------------------- -Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the +Changes can be made to `jvm.options` and `elasticsearch.yml` configuration files to configure the service after installation. Most changes (like JVM settings) will require a restart of the service in order to take effect. @@ -404,16 +403,16 @@ service in order to take effect. ==== Upgrade using the graphical user interface (GUI) The `.msi` package supports upgrading an installed version of Elasticsearch to a newer -version of Elasticsearch. The upgrade process through the GUI handles upgrading all +version. The upgrade process through the GUI handles upgrading all installed plugins as well as retaining both your data and configuration. -Downloading and clicking on a newer version of the `.msi` package will launch the GUI wizard. -The first step will list the read only properties from the previous installation: +Downloading and double-clicking on a newer version of the `.msi` package will launch the GUI wizard. +The first step will list the read-only properties from the previous installation: [[msi-installer-upgrade-notice]] image::images/msi_installer/msi_installer_upgrade_notice.png[] -The following configuration step allows certain configuration options to be changed: +The next step allows certain configuration options to be changed: [[msi-installer-upgrade-configuration]] image::images/msi_installer/msi_installer_upgrade_configuration.png[] @@ -434,11 +433,11 @@ The `.msi` can also upgrade Elasticsearch using the command line. A command line upgrade requires passing the **same** command line properties as used at first install time; the Windows Installer does not remember these properties. -For example, if you originally installed with the command line options `PLUGINS="x-pack"` and +For example, if you originally installed with the command line options `PLUGINS="ingest-geoip"` and `LOCKMEMORY="true"`, then you must pass these same values when performing an upgrade from the command line. -The **exception** to this is `INSTALLDIR` (if originally specified), which must be a different directory to the +The **exception** to this is the `INSTALLDIR` parameter (if originally specified), which must be a different directory to the current installation. If setting `INSTALLDIR`, the final directory in the path **must** be the version of Elasticsearch e.g. @@ -466,9 +465,8 @@ start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l upgrade.log The `.msi` package handles uninstallation of all directories and files added as part of installation. -WARNING: Uninstallation will remove **all** directories and their contents created as part of -installation, **including data within the data directory**. If you wish to retain your data upon -uninstallation, it is recommended that you make a copy of the data directory before uninstallation. +WARNING: Uninstallation will remove **all** contents created as part of +installation, **except for data, config or logs directories**. It is recommended that you make a copy of your data directory before upgrading or consider using the snapshot API. MSI installer packages do not provide a GUI for uninstallation. An installed program can be uninstalled by pressing the Windows key and typing `add or remove programs` to open the system settings. diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 2177440457acf..6abf5dea14d0e 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -91,9 +91,6 @@ using the `bin/elasticsearch-keystore add` command, call: [source,js] ---- POST _nodes/reload_secure_settings -{ - "secure_settings_password": "" -} ---- // CONSOLE This API will decrypt and re-read the entire keystore, on every cluster node, diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc index 1dc23e391fab1..6098ebe91578e 100644 --- a/docs/reference/sql/concepts.asciidoc +++ b/docs/reference/sql/concepts.asciidoc @@ -25,7 +25,7 @@ So let's start from the bottom; these roughly are: |`column` |`field` -|In both cases, at the lowest level, data is stored in in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_. +|In both cases, at the lowest level, data is stored in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_. Notice that in {es} a field can contain _multiple_ values of the same type (esentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type. {es-sql} will do its best to preserve the SQL semantic and, depending on the query, reject those that return fields with more than one value. diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc index 0908c2344bb15..eef2fbfbf5969 100644 --- a/docs/reference/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -22,6 +22,15 @@ the first parameter: $ ./bin/elasticsearch-sql-cli https://some.server:9200 -------------------------------------------------- +If security is enabled on your cluster, you can pass the username +and password in the form `username:password@host_name:port` +to the SQL CLI: + +[source,bash] +-------------------------------------------------- +$ ./bin/elasticsearch-sql-cli https://sql_user:strongpassword@some.server:9200 +-------------------------------------------------- + Once the CLI is running you can use any <> that Elasticsearch supports: diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc index a72f5ca61feb5..3da070a447ad7 100644 --- a/docs/reference/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -28,7 +28,7 @@ No need for additional hardware, processes, runtimes or libraries to query {es}; Lightweight and efficient:: -{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embrases and exposes to SQL to allow proper full-text search, in real-time, in the same declarative, succint fashion. +{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embraces and exposes SQL to allow proper full-text search, in real-time, in the same declarative, succint fashion. diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index dfc7371dd3756..321122d81f506 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -230,7 +230,7 @@ As many Elasticsearch tests are checking for a similar output, like the amount o `assertMatchCount()`:: Asserts a matching count from a percolation response `assertFirstHit()`:: Asserts the first hit hits the specified matcher `assertSecondHit()`:: Asserts the second hit hits the specified matcher -`assertThirdHit()`:: Asserts the third hits hits the specified matcher +`assertThirdHit()`:: Asserts the third hit hits the specified matcher `assertSearchHit()`:: Assert a certain element in a search response hits the specified matcher `assertNoFailures()`:: Asserts that no shard failures have occurred in the response `assertFailures()`:: Asserts that shard failures have happened during a search request diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index aac8c192372c5..8157e51e5e0cd 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -459,7 +459,7 @@ Upgrading indices create with Lucene 3.x (Elasticsearch v0.20 and before) to Luc [float] === Improve error handling when deleting files (STATUS: DONE, v1.4.0.Beta1) -Lucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10. +Lucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10. [float] === Using Lucene Checksums to verify shards during snapshot/restore (STATUS:DONE, v1.3.3) diff --git a/docs/src/test/cluster/config/scripts/calculate_score.painless b/docs/src/test/cluster/config/scripts/calculate_score.painless deleted file mode 100644 index 0fad3fc59f950..0000000000000 --- a/docs/src/test/cluster/config/scripts/calculate_score.painless +++ /dev/null @@ -1 +0,0 @@ -Math.log(_score * 2) + params.my_modifier diff --git a/docs/src/test/cluster/config/scripts/my_combine_script.painless b/docs/src/test/cluster/config/scripts/my_combine_script.painless deleted file mode 100644 index 106ef08d91fd3..0000000000000 --- a/docs/src/test/cluster/config/scripts/my_combine_script.painless +++ /dev/null @@ -1,5 +0,0 @@ -double profit = 0; -for (t in params._agg.transactions) { - profit += t -} -return profit diff --git a/docs/src/test/cluster/config/scripts/my_init_script.painless b/docs/src/test/cluster/config/scripts/my_init_script.painless deleted file mode 100644 index fb6aa11723cf1..0000000000000 --- a/docs/src/test/cluster/config/scripts/my_init_script.painless +++ /dev/null @@ -1 +0,0 @@ -params._agg.transactions = [] diff --git a/docs/src/test/cluster/config/scripts/my_map_script.painless b/docs/src/test/cluster/config/scripts/my_map_script.painless deleted file mode 100644 index f4700482d5585..0000000000000 --- a/docs/src/test/cluster/config/scripts/my_map_script.painless +++ /dev/null @@ -1 +0,0 @@ -params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value) diff --git a/docs/src/test/cluster/config/scripts/my_reduce_script.painless b/docs/src/test/cluster/config/scripts/my_reduce_script.painless deleted file mode 100644 index ca4f67ca2db01..0000000000000 --- a/docs/src/test/cluster/config/scripts/my_reduce_script.painless +++ /dev/null @@ -1,5 +0,0 @@ -double profit = 0; -for (a in params._aggs) { - profit += a -} -return profit diff --git a/docs/src/test/cluster/config/scripts/my_script.painless b/docs/src/test/cluster/config/scripts/my_script.painless deleted file mode 100644 index 55d0e99ed0f0c..0000000000000 --- a/docs/src/test/cluster/config/scripts/my_script.painless +++ /dev/null @@ -1,2 +0,0 @@ -// Simple script to load a field. Not really a good example, but a simple one. -doc[params.field].value diff --git a/gradle.properties b/gradle.properties index 08b03629ad53a..6b04e99c20441 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,2 +1,3 @@ org.gradle.daemon=false org.gradle.jvmargs=-Xmx2g +options.forkOptions.memoryMaximumSize=2g diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 0d4a9516871af..28861d273a5d2 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 94161917d1878..76d8f343e75cc 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.10-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=39e2d5803bbd5eaf6c8efe07067b0e5a00235e8c71318642b2ed262920b27721 +distributionSha256Sum=fc049dcbcb245d5892bebae143bd515a78f6a5a93cec99d489b312dc0ce4aad9 diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 00d6d96ef0d59..b1f3b338255c4 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' @@ -34,5 +31,5 @@ test.enabled = false jarHell.enabled = false forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 2017c2a418ac4..9c90837bd80ed 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -48,12 +46,13 @@ if (!isEclipse && !isIdea) { targetCompatibility = 9 } - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } + replaceSignatureFiles 'jdk-signatures' } - */ jar { metaInf { @@ -91,7 +90,7 @@ dependencies { forbiddenApisMain { // :libs:core does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index e171daeb79b85..3de0ae5117e6a 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -255,6 +255,10 @@ public static void checkJavaVersion(String resource, String targetVersion) { } private static void checkClass(Map clazzes, String clazz, Path jarpath) { + if (clazz.equals("module-info") || clazz.endsWith(".module-info")) { + // Ignore jigsaw module descriptions + return; + } Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java b/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java new file mode 100644 index 0000000000000..907874ca5735b --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Objects; + +/** + * Helper class similar to Arrays to handle conversions for Char arrays + */ +public final class CharArrays { + + private CharArrays() {} + + /** + * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding + * conversions to String. The provided byte[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + public static char[] utf8BytesToChars(byte[] utf8Bytes) { + final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); + final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); + final char[] chars; + if (charBuffer.hasArray()) { + // there is no guarantee that the char buffers backing array is the right size + // so we need to make a copy + chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); + Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data + } else { + final int length = charBuffer.limit() - charBuffer.position(); + chars = new char[length]; + charBuffer.get(chars); + // if the buffer is not read only we can reset and fill with 0's + if (charBuffer.isReadOnly() == false) { + charBuffer.clear(); // reset + for (int i = 0; i < charBuffer.limit(); i++) { + charBuffer.put((char) 0); + } + } + } + return chars; + } + + /** + * Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding + * conversions to String. The provided char[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + public static byte[] toUtf8Bytes(char[] chars) { + final CharBuffer charBuffer = CharBuffer.wrap(chars); + final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); + final byte[] bytes; + if (byteBuffer.hasArray()) { + // there is no guarantee that the byte buffers backing array is the right size + // so we need to make a copy + bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); + Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data + } else { + final int length = byteBuffer.limit() - byteBuffer.position(); + bytes = new byte[length]; + byteBuffer.get(bytes); + // if the buffer is not read only we can reset and fill with 0's + if (byteBuffer.isReadOnly() == false) { + byteBuffer.clear(); // reset + for (int i = 0; i < byteBuffer.limit(); i++) { + byteBuffer.put((byte) 0); + } + } + } + return bytes; + } + + /** + * Tests if a char[] contains a sequence of characters that match the prefix. This is like + * {@link String#startsWith(String)} but does not require conversion of the char[] to a string. + */ + public static boolean charsBeginsWith(String prefix, char[] chars) { + if (chars == null || prefix == null) { + return false; + } + + if (prefix.length() > chars.length) { + return false; + } + + for (int i = 0; i < prefix.length(); i++) { + if (chars[i] != prefix.charAt(i)) { + return false; + } + } + + return true; + } + + /** + * Constant time equality check of char arrays to avoid potential timing attacks. + */ + public static boolean constantTimeEquals(char[] a, char[] b) { + Objects.requireNonNull(a, "char arrays must not be null for constantTimeEquals"); + Objects.requireNonNull(b, "char arrays must not be null for constantTimeEquals"); + if (a.length != b.length) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length; i++) { + equals |= a[i] ^ b[i]; + } + + return equals == 0; + } + + /** + * Constant time equality check of strings to avoid potential timing attacks. + */ + public static boolean constantTimeEquals(String a, String b) { + Objects.requireNonNull(a, "strings must not be null for constantTimeEquals"); + Objects.requireNonNull(b, "strings must not be null for constantTimeEquals"); + if (a.length() != b.length()) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length(); i++) { + equals |= a.charAt(i) ^ b.charAt(i); + } + + return equals == 0; + } +} diff --git a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index e58268ef19251..95c56f94ee4e1 100644 --- a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -76,6 +76,28 @@ public void testDifferentJars() throws Exception { } } + public void testModuleInfo() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "module-info.class"), + makeJar(dir, "bar.jar", null, "module-info.class") + ), + logger::debug + ); + } + + public void testModuleInfoPackage() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "foo/bar/module-info.class"), + makeJar(dir, "bar.jar", null, "foo/bar/module-info.class") + ), + logger::debug + ); + } + public void testDirsOnClasspath() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); diff --git a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java new file mode 100644 index 0000000000000..9283283ab0861 --- /dev/null +++ b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.test.ESTestCase; + +import java.nio.charset.StandardCharsets; + +public class CharArraysTests extends ESTestCase { + + public void testCharsToBytes() { + final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32); + final byte[] expectedBytes = originalValue.getBytes(StandardCharsets.UTF_8); + final char[] valueChars = originalValue.toCharArray(); + + final byte[] convertedBytes = CharArrays.toUtf8Bytes(valueChars); + assertArrayEquals(expectedBytes, convertedBytes); + } + + public void testBytesToUtf8Chars() { + final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32); + final byte[] bytes = originalValue.getBytes(StandardCharsets.UTF_8); + final char[] expectedChars = originalValue.toCharArray(); + + final char[] convertedChars = CharArrays.utf8BytesToChars(bytes); + assertArrayEquals(expectedChars, convertedChars); + } + + public void testCharsBeginsWith() { + assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(4), null)); + assertFalse(CharArrays.charsBeginsWith(null, null)); + assertFalse(CharArrays.charsBeginsWith(null, randomAlphaOfLength(4).toCharArray())); + assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(2), randomAlphaOfLengthBetween(3, 8).toCharArray())); + + final String prefix = randomAlphaOfLengthBetween(2, 4); + assertTrue(CharArrays.charsBeginsWith(prefix, prefix.toCharArray())); + final char[] prefixedValue = prefix.concat(randomAlphaOfLengthBetween(1, 12)).toCharArray(); + assertTrue(CharArrays.charsBeginsWith(prefix, prefixedValue)); + + final String modifiedPrefix = randomBoolean() ? prefix.substring(1) : prefix.substring(0, prefix.length() - 1); + char[] nonMatchingValue; + do { + nonMatchingValue = modifiedPrefix.concat(randomAlphaOfLengthBetween(0, 12)).toCharArray(); + } while (new String(nonMatchingValue).startsWith(prefix)); + assertFalse(CharArrays.charsBeginsWith(prefix, nonMatchingValue)); + assertTrue(CharArrays.charsBeginsWith(modifiedPrefix, nonMatchingValue)); + } + + public void testConstantTimeEquals() { + final String value = randomAlphaOfLengthBetween(0, 32); + assertTrue(CharArrays.constantTimeEquals(value, value)); + assertTrue(CharArrays.constantTimeEquals(value.toCharArray(), value.toCharArray())); + + final String other = randomAlphaOfLengthBetween(1, 32); + assertFalse(CharArrays.constantTimeEquals(value, other)); + assertFalse(CharArrays.constantTimeEquals(value.toCharArray(), other.toCharArray())); + } +} diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index c09a2a4ebd1b3..853c78646c25b 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -33,7 +31,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 61437be6aff13..37b494624eddb 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -34,7 +32,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/nio/build.gradle b/libs/nio/build.gradle index 43c9a133a3f37..f6a6ff652450f 100644 --- a/libs/nio/build.gradle +++ b/libs/nio/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -62,5 +59,5 @@ if (isEclipse) { forbiddenApisMain { // nio does not depend on core, so only jdk signatures should be checked // es-all is not checked as we connect and accept sockets - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 93fdfd01c8f0c..3baf3513b1206 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,7 +44,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index c8b37108ff93c..0ec4e0d6ad312 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -57,7 +55,7 @@ dependencies { forbiddenApisMain { // x-content does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index 1d2b8a36810eb..b5dc23fbdb893 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -64,7 +64,7 @@ public void testNGramDeprecationWarning() throws IOException { public void testNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -104,7 +104,7 @@ public void testEdgeNGramDeprecationWarning() throws IOException { public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java index 0d5389a6d6594..e284877978851 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java @@ -60,7 +60,7 @@ public void testDeprecationWarning() throws IOException { public void testNoDeprecationWarningPre6_3() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_2_4)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 4f35bbee28dfc..1681258e7c7ee 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -26,6 +26,7 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless') compile project(':libs:grok') + compile project(':libs:dissect') } compileJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java new file mode 100644 index 0000000000000..58f04ccdd431f --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DissectProcessor.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.dissect.DissectParser; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; + +import java.util.Map; + +public final class DissectProcessor extends AbstractProcessor { + + public static final String TYPE = "dissect"; + //package private members for testing + final String field; + final boolean ignoreMissing; + final String pattern; + final String appendSeparator; + final DissectParser dissectParser; + + DissectProcessor(String tag, String field, String pattern, String appendSeparator, boolean ignoreMissing) { + super(tag); + this.field = field; + this.ignoreMissing = ignoreMissing; + this.pattern = pattern; + this.appendSeparator = appendSeparator; + this.dissectParser = new DissectParser(pattern, appendSeparator); + } + + @Override + public void execute(IngestDocument ingestDocument) { + String input = ingestDocument.getFieldValue(field, String.class, ignoreMissing); + if (input == null && ignoreMissing) { + return; + } else if (input == null) { + throw new IllegalArgumentException("field [" + field + "] is null, cannot process it."); + } + dissectParser.parse(input).forEach(ingestDocument::setFieldValue); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + @Override + public DissectProcessor create(Map registry, String processorTag, Map config) { + String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + String pattern = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pattern"); + String appendSeparator = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "append_separator", ""); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + return new DissectProcessor(processorTag, field, pattern, appendSeparator, ignoreMissing); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java index f5bf9cc959105..31c0ae8cc3dc8 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.elasticsearch.script.ScriptService; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; @@ -96,6 +97,13 @@ Processor getProcessor() { } public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public ForEachProcessor create(Map factories, String tag, Map config) throws Exception { @@ -107,7 +115,8 @@ public ForEachProcessor create(Map factories, String throw newConfigurationException(TYPE, tag, "processor", "Must specify exactly one processor type"); } Map.Entry> entry = entries.iterator().next(); - Processor processor = ConfigurationUtils.readProcessor(factories, entry.getKey(), entry.getValue()); + Processor processor = + ConfigurationUtils.readProcessor(factories, scriptService, entry.getKey(), entry.getValue()); return new ForEachProcessor(tag, field, processor, ignoreMissing); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index bc475a2a00539..8b048282814ea 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -72,7 +72,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(ConvertProcessor.TYPE, new ConvertProcessor.Factory()); processors.put(GsubProcessor.TYPE, new GsubProcessor.Factory()); processors.put(FailProcessor.TYPE, new FailProcessor.Factory(parameters.scriptService)); - processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory()); + processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory(parameters.scriptService)); processors.put(DateIndexNameProcessor.TYPE, new DateIndexNameProcessor.Factory(parameters.scriptService)); processors.put(SortProcessor.TYPE, new SortProcessor.Factory()); processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(GROK_PATTERNS, createGrokThreadWatchdog(parameters))); @@ -82,6 +82,8 @@ public Map getProcessors(Processor.Parameters paramet processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); + processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); + processors.put(DissectProcessor.TYPE, new DissectProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java new file mode 100644 index 0000000000000..77ffdb919193f --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/PipelineProcessor.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import java.util.Map; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.Processor; + +public class PipelineProcessor extends AbstractProcessor { + + public static final String TYPE = "pipeline"; + + private final String pipelineName; + + private final IngestService ingestService; + + private PipelineProcessor(String tag, String pipelineName, IngestService ingestService) { + super(tag); + this.pipelineName = pipelineName; + this.ingestService = ingestService; + } + + @Override + public void execute(IngestDocument ingestDocument) throws Exception { + Pipeline pipeline = ingestService.getPipeline(pipelineName); + if (pipeline == null) { + throw new IllegalStateException("Pipeline processor configured for non-existent pipeline [" + pipelineName + ']'); + } + ingestDocument.executePipeline(pipeline); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + private final IngestService ingestService; + + public Factory(IngestService ingestService) { + this.ingestService = ingestService; + } + + @Override + public PipelineProcessor create(Map registry, String processorTag, + Map config) throws Exception { + String pipeline = + ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pipeline"); + return new PipelineProcessor(processorTag, pipeline, ingestService); + } + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java new file mode 100644 index 0000000000000..ba1b2bd1eb576 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.dissect.DissectException; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.is; + +public class DissectProcessorFactoryTests extends ESTestCase { + + public void testCreate() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + String fieldName = RandomDocumentPicks.randomFieldName(random()); + String processorTag = randomAlphaOfLength(10); + String pattern = "%{a},%{b},%{c}"; + String appendSeparator = ":"; + + Map config = new HashMap<>(); + config.put("field", fieldName); + config.put("pattern", pattern); + config.put("append_separator", appendSeparator); + config.put("ignore_missing", true); + + DissectProcessor processor = factory.create(null, processorTag, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.field, equalTo(fieldName)); + assertThat(processor.pattern, equalTo(pattern)); + assertThat(processor.appendSeparator, equalTo(appendSeparator)); + assertThat(processor.dissectParser, is(notNullValue())); + assertThat(processor.ignoreMissing, is(true)); + } + + public void testCreateMissingField() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "%{a},%{b},%{c}"); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); + } + + public void testCreateMissingPattern() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("field", randomAlphaOfLength(10)); + Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", config)); + assertThat(e.getMessage(), Matchers.equalTo("[pattern] required property is missing")); + } + + public void testCreateMissingOptionals() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "%{a},%{b},%{c}"); + config.put("field", randomAlphaOfLength(10)); + DissectProcessor processor = factory.create(null, "_tag", config); + assertThat(processor.appendSeparator, equalTo("")); + assertThat(processor.ignoreMissing, is(false)); + } + + public void testCreateBadPattern() { + DissectProcessor.Factory factory = new DissectProcessor.Factory(); + Map config = new HashMap<>(); + config.put("pattern", "no keys defined"); + config.put("field", randomAlphaOfLength(10)); + expectThrows(DissectException.class, () -> factory.create(null, "_tag", config)); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java new file mode 100644 index 0000000000000..bb5d26d01a865 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.dissect.DissectException; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.util.Collections; +import java.util.HashMap; + +import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.hamcrest.Matchers.equalTo; + +/** + * Basic tests for the {@link DissectProcessor}. See the {@link org.elasticsearch.dissect.DissectParser} test suite for a comprehensive + * set of dissect tests. + */ +public class DissectProcessorTests extends ESTestCase { + + public void testMatch() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo,bar,baz")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("b", String.class), equalTo("bar")); + assertThat(ingestDocument.getFieldValue("c", String.class), equalTo("baz")); + } + + public void testMatchOverwrite() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + MapBuilder.newMapBuilder() + .put("message", "foo,bar,baz") + .put("a", "willgetstompped") + .map()); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("willgetstompped")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("b", String.class), equalTo("bar")); + assertThat(ingestDocument.getFieldValue("c", String.class), equalTo("baz")); + } + + public void testAdvancedMatch() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo bar,,,,,,,baz nope:notagain 😊 🐇 🙃")); + DissectProcessor dissectProcessor = + new DissectProcessor("", "message", "%{a->} %{*b->},%{&b} %{}:%{?skipme} %{+smile/2} 🐇 %{+smile/1}", "::::", true); + dissectProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); + assertThat(ingestDocument.getFieldValue("bar", String.class), equalTo("baz")); + expectThrows(IllegalArgumentException.class, () -> ingestDocument.getFieldValue("nope", String.class)); + expectThrows(IllegalArgumentException.class, () -> ingestDocument.getFieldValue("notagain", String.class)); + assertThat(ingestDocument.getFieldValue("smile", String.class), equalTo("🙃::::😊")); + } + + public void testMiss() { + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, + Collections.singletonMap("message", "foo:bar,baz")); + DissectProcessor dissectProcessor = new DissectProcessor("", "message", "%{a},%{b},%{c}", "", true); + DissectException e = expectThrows(DissectException.class, () -> dissectProcessor.execute(ingestDocument)); + assertThat(e.getMessage(), CoreMatchers.containsString("Unable to find match for dissect pattern")); + } + + public void testNonStringValueWithIgnoreMissing() { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", true); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + ingestDocument.setFieldValue(fieldName, randomInt()); + Exception e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.Integer] cannot be cast to [java.lang.String]")); + } + + public void testNullValueWithIgnoreMissing() throws Exception { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", true); + IngestDocument originalIngestDocument = RandomDocumentPicks + .randomIngestDocument(random(), Collections.singletonMap(fieldName, null)); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); + } + + public void testNullValueWithOutIgnoreMissing() { + String fieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = new DissectProcessor("", fieldName, "%{a},%{b},%{c}", "", false); + IngestDocument originalIngestDocument = RandomDocumentPicks + .randomIngestDocument(random(), Collections.singletonMap(fieldName, null)); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index f382ad8dcfb6a..7ab19c4147eab 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -30,14 +31,17 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; public class ForEachProcessorFactoryTests extends ESTestCase { + private final ScriptService scriptService = mock(ScriptService.class); + public void testCreate() throws Exception { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -53,7 +57,7 @@ public void testSetIgnoreMissing() throws Exception { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -71,7 +75,7 @@ public void testCreateWithTooManyProcessorTypes() throws Exception { Map registry = new HashMap<>(); registry.put("_first", (r, t, c) -> processor); registry.put("_second", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -84,7 +88,7 @@ public void testCreateWithTooManyProcessorTypes() throws Exception { } public void testCreateWithNonExistingProcessorType() throws Exception { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); @@ -97,7 +101,7 @@ public void testCreateWithMissingField() throws Exception { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("processor", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, config)); @@ -105,7 +109,7 @@ public void testCreateWithMissingField() throws Exception { } public void testCreateWithMissingProcessor() { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Collections.emptyMap(), null, config)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java new file mode 100644 index 0000000000000..5baf3cf822d72 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/PipelineProcessorTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.ingest.common; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ingest.CompoundProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.test.ESTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PipelineProcessorTests extends ESTestCase { + + public void testExecutesPipeline() throws Exception { + String pipelineId = "pipeline"; + IngestService ingestService = mock(IngestService.class); + CompletableFuture invoked = new CompletableFuture<>(); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Pipeline pipeline = new Pipeline( + pipelineId, null, null, + new CompoundProcessor(new Processor() { + @Override + public void execute(final IngestDocument ingestDocument) throws Exception { + invoked.complete(ingestDocument); + } + + @Override + public String getType() { + return null; + } + + @Override + public String getTag() { + return null; + } + }) + ); + when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Map config = new HashMap<>(); + config.put("pipeline", pipelineId); + factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument); + assertEquals(testIngestDocument, invoked.get()); + } + + public void testThrowsOnMissingPipeline() throws Exception { + IngestService ingestService = mock(IngestService.class); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Map config = new HashMap<>(); + config.put("pipeline", "missingPipelineId"); + IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument) + ); + assertEquals( + "Pipeline processor configured for non-existent pipeline [missingPipelineId]", e.getMessage() + ); + } + + public void testThrowsOnRecursivePipelineInvocations() throws Exception { + String innerPipelineId = "inner"; + String outerPipelineId = "outer"; + IngestService ingestService = mock(IngestService.class); + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map outerConfig = new HashMap<>(); + outerConfig.put("pipeline", innerPipelineId); + PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService); + Pipeline outer = new Pipeline( + outerPipelineId, null, null, + new CompoundProcessor(factory.create(Collections.emptyMap(), null, outerConfig)) + ); + Map innerConfig = new HashMap<>(); + innerConfig.put("pipeline", outerPipelineId); + Pipeline inner = new Pipeline( + innerPipelineId, null, null, + new CompoundProcessor(factory.create(Collections.emptyMap(), null, innerConfig)) + ); + when(ingestService.getPipeline(outerPipelineId)).thenReturn(outer); + when(ingestService.getPipeline(innerPipelineId)).thenReturn(inner); + outerConfig.put("pipeline", innerPipelineId); + ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> factory.create(Collections.emptyMap(), null, outerConfig).execute(testIngestDocument) + ); + assertEquals( + "Recursive invocation of pipeline [inner] detected.", e.getRootCause().getMessage() + ); + } +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index 86557946ac0dd..eb23b7840ee6a 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -10,9 +10,11 @@ - contains: { nodes.$master.modules: { name: ingest-common } } - contains: { nodes.$master.ingest.processors: { type: append } } + - contains: { nodes.$master.ingest.processors: { type: bytes } } - contains: { nodes.$master.ingest.processors: { type: convert } } - contains: { nodes.$master.ingest.processors: { type: date } } - contains: { nodes.$master.ingest.processors: { type: date_index_name } } + - contains: { nodes.$master.ingest.processors: { type: dissect } } - contains: { nodes.$master.ingest.processors: { type: dot_expander } } - contains: { nodes.$master.ingest.processors: { type: fail } } - contains: { nodes.$master.ingest.processors: { type: foreach } } @@ -30,4 +32,3 @@ - contains: { nodes.$master.ingest.processors: { type: split } } - contains: { nodes.$master.ingest.processors: { type: trim } } - contains: { nodes.$master.ingest.processors: { type: uppercase } } - - contains: { nodes.$master.ingest.processors: { type: bytes } } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml new file mode 100644 index 0000000000000..1a7c2e593d43c --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -0,0 +1,90 @@ +--- +teardown: +- do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test dissect processor match": +- do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "%{a} %{b} %{c}" + } + } + ] + } +- match: { acknowledged: true } + +- do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {message: "foo bar baz"} + +- do: + get: + index: test + type: test + id: 1 +- match: { _source.message: "foo bar baz" } +- match: { _source.a: "foo" } +- match: { _source.b: "bar" } +- match: { _source.c: "baz" } +--- +"Test dissect processor mismatch": +- do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "%{a},%{b},%{c}" + } + } + ] + } +- match: { acknowledged: true } + +- do: + catch: '/Unable to find match for dissect pattern: \%\{a\},\%\{b\},\%\{c\} against source: foo bar baz/' + index: + index: test + type: test + id: 2 + pipeline: "my_pipeline" + body: {message: "foo bar baz"} + +--- +"Test fail to create dissect processor": +- do: + catch: '/Unable to parse pattern/' + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "bad pattern" + } + } + ] + } + diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml new file mode 100644 index 0000000000000..532519c4ca073 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml @@ -0,0 +1,81 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test conditional processor fulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'bar'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - match: { _source.bytes_target_field: 1024 } + +--- +"Test conditional processor unfulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'foo'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - is_false: _source.bytes_target_field + diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml new file mode 100644 index 0000000000000..355ba2d42104a --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml @@ -0,0 +1,113 @@ +--- +teardown: +- do: + ingest.delete_pipeline: + id: "inner" + ignore: 404 + +- do: + ingest.delete_pipeline: + id: "outer" + ignore: 404 + +--- +"Test Pipeline Processor with Simple Inner Pipeline": +- do: + ingest.put_pipeline: + id: "inner" + body: > + { + "description" : "inner pipeline", + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + }, + { + "set" : { + "field": "baz", + "value": "blub" + } + } + ] + } +- match: { acknowledged: true } + +- do: + ingest.put_pipeline: + id: "outer" + body: > + { + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "inner" + } + } + ] + } +- match: { acknowledged: true } + +- do: + index: + index: test + type: test + id: 1 + pipeline: "outer" + body: {} + +- do: + get: + index: test + type: test + id: 1 +- match: { _source.foo: "bar" } +- match: { _source.baz: "blub" } + +--- +"Test Pipeline Processor with Circular Pipelines": +- do: + ingest.put_pipeline: + id: "outer" + body: > + { + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "inner" + } + } + ] + } +- match: { acknowledged: true } + +- do: + ingest.put_pipeline: + id: "inner" + body: > + { + "description" : "inner pipeline", + "processors" : [ + { + "pipeline" : { + "pipeline": "outer" + } + } + ] + } +- match: { acknowledged: true } + +- do: + catch: /illegal_state_exception/ + index: + index: test + type: test + id: 1 + pipeline: "outer" + body: {} +- match: { error.root_cause.0.type: "exception" } +- match: { error.root_cause.0.reason: "java.lang.IllegalArgumentException: java.lang.IllegalStateException: Recursive invocation of pipeline [inner] detected." } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index caa9fa4831add..eea9e31d4a79d 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -120,21 +119,17 @@ public MultiSearchTemplateRequest indicesOptions(IndicesOptions indicesOptions) @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - maxConcurrentSearchRequests = in.readVInt(); - } + maxConcurrentSearchRequests = in.readVInt(); requests = in.readStreamableList(SearchTemplateRequest::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeVInt(maxConcurrentSearchRequests); - } + out.writeVInt(maxConcurrentSearchRequests); out.writeStreamableList(requests); } - + @Override public boolean equals(Object o) { if (this == o) return true; @@ -148,9 +143,9 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions); - } - - public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, + } + + public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, XContent xContent) throws IOException { ByteArrayOutputStream output = new ByteArrayOutputStream(); for (SearchTemplateRequest templateRequest : multiSearchTemplateRequest.requests()) { @@ -168,5 +163,5 @@ public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearch } return output.toByteArray(); } - + } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java index cbc6adf6be227..023d3b246761d 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateWithoutContentIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.script.mustache; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -30,14 +31,14 @@ public class SearchTemplateWithoutContentIT extends ESRestTestCase { public void testSearchTemplateMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - randomBoolean() ? "POST" : "GET", "/_search/template")); + new Request(randomBoolean() ? "POST" : "GET", "/_search/template"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body or source parameter is required")); } public void testMultiSearchTemplateMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - randomBoolean() ? "POST" : "GET", "/_msearch/template")); + new Request(randomBoolean() ? "POST" : "GET", "/_msearch/template"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body or source parameter is required")); } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index c38325edd1424..7acbff6cb0b93 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -61,9 +61,12 @@ public final class Whitelist { /** The {@link List} of all the whitelisted Painless classes. */ public final List whitelistClasses; + public final List whitelistBindings; + /** Standard constructor. All values must be not {@code null}. */ - public Whitelist(ClassLoader classLoader, List whitelistClasses) { + public Whitelist(ClassLoader classLoader, List whitelistClasses, List whitelistBindings) { this.classLoader = Objects.requireNonNull(classLoader); this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); + this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings)); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java new file mode 100644 index 0000000000000..364dbbb09ca9b --- /dev/null +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Objects; + +/** + * A binding represents a method call that stores state. Each binding class must have exactly one + * public constructor and one public method excluding those inherited directly from {@link Object}. + * The canonical type name parameters provided must match those of the constructor and method combined. + * The constructor for a binding class will be called when the binding method is called for the first + * time at which point state may be stored for the arguments passed into the constructor. The method + * for a binding class will be called each time the binding method is called and may use the previously + * stored state. + */ +public class WhitelistBinding { + + /** Information about where this constructor was whitelisted from. */ + public final String origin; + + /** The Java class name this binding represents. */ + public final String targetJavaClassName; + + /** The method name for this binding. */ + public final String methodName; + + /** + * The canonical type name for the return type. + */ + public final String returnCanonicalTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List canonicalTypeNameParameters; + + /** Standard constructor. All values must be not {@code null}. */ + public WhitelistBinding(String origin, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + this.origin = Objects.requireNonNull(origin); + this.targetJavaClassName = Objects.requireNonNull(targetJavaClassName); + + this.methodName = Objects.requireNonNull(methodName); + this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); + this.canonicalTypeNameParameters = Objects.requireNonNull(canonicalTypeNameParameters); + } +} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 0b216ae5c2953..7b3eb75aa3ecd 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -62,9 +62,8 @@ public final class WhitelistClass { /** Standard constructor. All values must be not {@code null}. */ public WhitelistClass(String origin, String javaClassName, boolean noImport, - List whitelistConstructors, - List whitelistMethods, - List whitelistFields) { + List whitelistConstructors, List whitelistMethods, List whitelistFields) + { this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index a4a0076626a9c..0279c82f1b67b 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -133,6 +133,7 @@ public final class WhitelistLoader { */ public static Whitelist loadFromResourceFiles(Class resource, String... filepaths) { List whitelistClasses = new ArrayList<>(); + List whitelistBindings = new ArrayList<>(); // Execute a single pass through the whitelist text files. This will gather all the // constructors, methods, augmented methods, and fields for each whitelisted class. @@ -141,8 +142,9 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep int number = -1; try (LineNumberReader reader = new LineNumberReader( - new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + String parseType = null; String whitelistClassOrigin = null; String javaClassName = null; boolean noImport = false; @@ -165,7 +167,11 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep // Ensure the final token of the line is '{'. if (line.endsWith("{") == false) { throw new IllegalArgumentException( - "invalid class definition: failed to parse class opening bracket [" + line + "]"); + "invalid class definition: failed to parse class opening bracket [" + line + "]"); + } + + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed class definition [" + line + "]"); } // Parse the Java class name. @@ -178,6 +184,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep throw new IllegalArgumentException("invalid class definition: failed to parse class name [" + line + "]"); } + parseType = "class"; whitelistClassOrigin = "[" + filepath + "]:[" + number + "]"; javaClassName = tokens[0]; @@ -185,34 +192,117 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistConstructors = new ArrayList<>(); whitelistMethods = new ArrayList<>(); whitelistFields = new ArrayList<>(); + } else if (line.startsWith("static ")) { + // Ensure the final token of the line is '{'. + if (line.endsWith("{") == false) { + throw new IllegalArgumentException( + "invalid static definition: failed to parse static opening bracket [" + line + "]"); + } - // Handle the end of a class, by creating a new WhitelistClass with all the previously gathered - // constructors, methods, augmented methods, and fields, and adding it to the list of whitelisted classes. + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed static definition [" + line + "]"); + } + + parseType = "static"; + + // Handle the end of a definition and reset all previously gathered values. // Expects the following format: '}' '\n' } else if (line.equals("}")) { - if (javaClassName == null) { - throw new IllegalArgumentException("invalid class definition: extraneous closing bracket"); + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: extraneous closing bracket"); } - whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, - whitelistConstructors, whitelistMethods, whitelistFields)); + // Create a new WhitelistClass with all the previously gathered constructors, methods, + // augmented methods, and fields, and add it to the list of whitelisted classes. + if ("class".equals(parseType)) { + whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, + whitelistConstructors, whitelistMethods, whitelistFields)); + + whitelistClassOrigin = null; + javaClassName = null; + noImport = false; + whitelistConstructors = null; + whitelistMethods = null; + whitelistFields = null; + } - // Set all the variables to null to ensure a new class definition is found before other parsable values. - whitelistClassOrigin = null; - javaClassName = null; - noImport = false; - whitelistConstructors = null; - whitelistMethods = null; - whitelistFields = null; + // Reset the parseType. + parseType = null; - // Handle all other valid cases. - } else { + // Handle static definition types. + // Expects the following format: ID ID '(' ( ID ( ',' ID )* )? ')' 'bound_to' ID '\n' + } else if ("static".equals(parseType)) { + // Mark the origin of this parsable object. + String origin = "[" + filepath + "]:[" + number + "]"; + + // Parse the tokens prior to the method parameters. + int parameterStartIndex = line.indexOf('('); + + if (parameterStartIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: start of method parameters not found [" + line + "]"); + } + + String[] tokens = line.substring(0, parameterStartIndex).trim().split("\\s+"); + + String methodName; + + // Based on the number of tokens, look up the Java method name. + if (tokens.length == 2) { + methodName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); + } + + String returnCanonicalTypeName = tokens[0]; + + // Parse the method parameters. + int parameterEndIndex = line.indexOf(')'); + + if (parameterEndIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: end of method parameters not found [" + line + "]"); + } + + String[] canonicalTypeNameParameters = + line.substring(parameterStartIndex + 1, parameterEndIndex).replaceAll("\\s+", "").split(","); + + // Handle the case for a method with no parameters. + if ("".equals(canonicalTypeNameParameters[0])) { + canonicalTypeNameParameters = new String[0]; + } + + // Parse the static type and class. + tokens = line.substring(parameterEndIndex + 1).trim().split("\\s+"); + + String staticType; + String targetJavaClassName; + + // Based on the number of tokens, look up the type and class. + if (tokens.length == 2) { + staticType = tokens[0]; + targetJavaClassName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid static definition: unexpected format [" + line + "]"); + } + + // Check the static type is valid. + if ("bound_to".equals(staticType) == false) { + throw new IllegalArgumentException( + "invalid static definition: unexpected static type [" + staticType + "] [" + line + "]"); + } + + whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName, + methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters))); + + // Handle class definition types. + } else if ("class".equals(parseType)) { // Mark the origin of this parsable object. String origin = "[" + filepath + "]:[" + number + "]"; // Ensure we have a defined class before adding any constructors, methods, augmented methods, or fields. - if (javaClassName == null) { - throw new IllegalArgumentException("invalid object definition: expected a class name [" + line + "]"); + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: expected one of ['class', 'static'] [" + line + "]"); } // Handle the case for a constructor definition. @@ -221,7 +311,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid constructor definition: expected a closing parenthesis [" + line + "]"); + "invalid constructor definition: expected a closing parenthesis [" + line + "]"); } // Parse the constructor parameters. @@ -234,34 +324,34 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistConstructors.add(new WhitelistConstructor(origin, Arrays.asList(tokens))); - // Handle the case for a method or augmented method definition. - // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' + // Handle the case for a method or augmented method definition. + // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' } else if (line.contains("(")) { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid method definition: expected a closing parenthesis [" + line + "]"); + "invalid method definition: expected a closing parenthesis [" + line + "]"); } // Parse the tokens prior to the method parameters. int parameterIndex = line.indexOf('('); - String[] tokens = line.trim().substring(0, parameterIndex).split("\\s+"); + String[] tokens = line.substring(0, parameterIndex).trim().split("\\s+"); - String javaMethodName; + String methodName; String javaAugmentedClassName; // Based on the number of tokens, look up the Java method name and if provided the Java augmented class. if (tokens.length == 2) { - javaMethodName = tokens[1]; + methodName = tokens[1]; javaAugmentedClassName = null; } else if (tokens.length == 3) { - javaMethodName = tokens[2]; + methodName = tokens[2]; javaAugmentedClassName = tokens[1]; } else { throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); } - String painlessReturnTypeName = tokens[0]; + String returnCanonicalTypeName = tokens[0]; // Parse the method parameters. tokens = line.substring(parameterIndex + 1, line.length() - 1).replaceAll("\\s+", "").split(","); @@ -271,11 +361,11 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep tokens = new String[0]; } - whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, javaMethodName, - painlessReturnTypeName, Arrays.asList(tokens))); + whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, methodName, + returnCanonicalTypeName, Arrays.asList(tokens))); - // Handle the case for a field definition. - // Expects the following format: ID ID '\n' + // Handle the case for a field definition. + // Expects the following format: ID ID '\n' } else { // Parse the field tokens. String[] tokens = line.split("\\s+"); @@ -287,20 +377,23 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistFields.add(new WhitelistField(origin, tokens[1], tokens[0])); } + } else { + throw new IllegalArgumentException("invalid definition: unable to parse line [" + line + "]"); } } // Ensure all classes end with a '}' token before the end of the file. if (javaClassName != null) { - throw new IllegalArgumentException("invalid class definition: expected closing bracket"); + throw new IllegalArgumentException("invalid definition: expected closing bracket"); } } catch (Exception exception) { throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(loader, whitelistClasses); + return new Whitelist(loader, whitelistClasses, whitelistBindings); } private WhitelistLoader() {} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 5cd023a3591ad..f450ee0238d19 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -67,7 +67,8 @@ public class WhitelistMethod { * is augmented as described in the class documentation. */ public WhitelistMethod(String origin, String augmentedCanonicalClassName, String methodName, - String returnCanonicalTypeName, List canonicalTypeNameParameters) { + String returnCanonicalTypeName, List canonicalTypeNameParameters) { + this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 5292b4d195056..27db9222f32ef 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -22,7 +22,7 @@ parser grammar PainlessParser; options { tokenVocab=PainlessLexer; } source - : function* statement* dstatement? EOF + : function* statement* EOF ; function @@ -35,7 +35,7 @@ parameters statement : rstatement - | dstatement SEMICOLON + | dstatement ( SEMICOLON | EOF ) ; // Note we use a predicate on the if/else case here to prevent the diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java new file mode 100644 index 0000000000000..fc2a10891f623 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class BindingTest { + public int state; + + public BindingTest(int state0, int state1) { + this.state = state0 + state1; + } + + public int testAddWithState(int istateless, double dstateless) { + return istateless + state + (int)dstateless; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index 83eb74d827f88..d18cf2780cf3c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -31,6 +31,7 @@ public class Globals { private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); + private final Map> bindings = new HashMap<>(); private final BitSet statements; /** Create a new Globals from the set of statement boundaries */ @@ -54,7 +55,15 @@ public void addConstantInitializer(Constant constant) { throw new IllegalStateException("constant initializer: " + constant.name + " already exists"); } } - + + /** Adds a new binding to be written as a local variable */ + public String addBinding(Class type) { + String name = "$binding$" + bindings.size(); + bindings.put(name, type); + + return name; + } + /** Returns the current synthetic methods */ public Map getSyntheticMethods() { return syntheticMethods; @@ -64,7 +73,12 @@ public Map getSyntheticMethods() { public Map getConstantInitializers() { return constantInitializers; } - + + /** Returns the current bindings */ + public Map> getBindings() { + return bindings; + } + /** Returns the set of statement boundaries */ public BitSet getStatements() { return statements; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java index 345db46f8875f..7de8353194dda 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java @@ -21,6 +21,7 @@ import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupUtility; +import org.elasticsearch.painless.lookup.def; import java.lang.invoke.MethodType; import java.lang.reflect.Field; @@ -190,7 +191,7 @@ private static Class definitionTypeForClass(PainlessLookup painlessLookup, Cl componentType = componentType.getComponentType(); } - if (painlessLookup.lookupPainlessClass(componentType) == null) { + if (componentType != def.class && painlessLookup.lookupPainlessClass(componentType) == null) { throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType)); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 7fa10f6e9fbf2..feebacc60687b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -1,17 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; - -import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) abstract class PainlessLexer extends Lexer { @@ -21,16 +17,16 @@ abstract class PainlessLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int AFTER_DOT = 1; public static String[] modeNames = { @@ -38,39 +34,39 @@ abstract class PainlessLexer extends Lexer { }; public static final String[] ruleNames = { - "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", - "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", - "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", - "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", - "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", - "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", - "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", - "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", - "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", + "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", + "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", + "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", + "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", + "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", + "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", + "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", + "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", + "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index bef57d22e9ea9..5a823ecfda308 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -1,25 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; - -import org.antlr.v4.runtime.FailedPredicateException; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.tree.ParseTreeVisitor; -import org.antlr.v4.runtime.tree.TerminalNode; - +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) class PainlessParser extends Parser { @@ -29,57 +17,57 @@ class PainlessParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int - RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, - RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, - RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, - RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, - RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, - RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, - RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, - RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, RULE_lamtype = 31, RULE_funcref = 32; public static final String[] ruleNames = { - "source", "function", "parameters", "statement", "rstatement", "dstatement", - "trailer", "block", "empty", "initializer", "afterthought", "declaration", - "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", - "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", - "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", "lambda", "lamtype", "funcref" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -145,9 +133,6 @@ public List statement() { public StatementContext statement(int i) { return getRuleContext(StatementContext.class,i); } - public DstatementContext dstatement() { - return getRuleContext(DstatementContext.class,0); - } public SourceContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -177,7 +162,7 @@ public final SourceContext source() throws RecognitionException { setState(66); function(); } - } + } } setState(71); _errHandler.sync(this); @@ -185,30 +170,19 @@ public final SourceContext source() throws RecognitionException { } setState(75); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,1,_ctx); - while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { - if ( _alt==1 ) { - { - { - setState(72); - statement(); - } - } - } - setState(77); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,1,_ctx); - } - setState(79); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(78); - dstatement(); + { + setState(72); + statement(); + } } + setState(77); + _errHandler.sync(this); + _la = _input.LA(1); } - - setState(81); + setState(78); match(EOF); } } @@ -251,13 +225,13 @@ public final FunctionContext function() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(83); + setState(80); decltype(); - setState(84); + setState(81); match(ID); - setState(85); + setState(82); parameters(); - setState(86); + setState(83); block(); } } @@ -307,38 +281,38 @@ public final ParametersContext parameters() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(88); + setState(85); match(LP); - setState(100); + setState(97); _la = _input.LA(1); if (_la==TYPE) { { - setState(89); + setState(86); decltype(); - setState(90); + setState(87); match(ID); - setState(97); + setState(94); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(91); + setState(88); match(COMMA); - setState(92); + setState(89); decltype(); - setState(93); + setState(90); match(ID); } } - setState(99); + setState(96); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(102); + setState(99); match(RP); } } @@ -361,6 +335,7 @@ public DstatementContext dstatement() { return getRuleContext(DstatementContext.class,0); } public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } public StatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -375,8 +350,9 @@ public T accept(ParseTreeVisitor visitor) { public final StatementContext statement() throws RecognitionException { StatementContext _localctx = new StatementContext(_ctx, getState()); enterRule(_localctx, 6, RULE_statement); + int _la; try { - setState(108); + setState(105); switch (_input.LA(1)) { case IF: case WHILE: @@ -384,7 +360,7 @@ public final StatementContext statement() throws RecognitionException { case TRY: enterOuterAlt(_localctx, 1); { - setState(104); + setState(101); rstatement(); } break; @@ -415,10 +391,15 @@ public final StatementContext statement() throws RecognitionException { case ID: enterOuterAlt(_localctx, 2); { - setState(105); + setState(102); dstatement(); - setState(106); - match(SEMICOLON); + setState(103); + _la = _input.LA(1); + if ( !(_la==EOF || _la==SEMICOLON) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } } break; default: @@ -441,7 +422,7 @@ public RstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_rstatement; } - + public RstatementContext() { } public void copyFrom(RstatementContext ctx) { super.copyFrom(ctx); @@ -584,37 +565,37 @@ public final RstatementContext rstatement() throws RecognitionException { int _la; try { int _alt; - setState(170); + setState(167); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: _localctx = new IfContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(110); + setState(107); match(IF); - setState(111); + setState(108); match(LP); - setState(112); + setState(109); expression(0); - setState(113); + setState(110); match(RP); - setState(114); + setState(111); trailer(); - setState(118); + setState(115); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(115); + setState(112); match(ELSE); - setState(116); + setState(113); trailer(); } break; case 2: { - setState(117); + setState(114); if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); } break; @@ -625,15 +606,15 @@ public final RstatementContext rstatement() throws RecognitionException { _localctx = new WhileContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(120); + setState(117); match(WHILE); - setState(121); + setState(118); match(LP); - setState(122); + setState(119); expression(0); - setState(123); + setState(120); match(RP); - setState(126); + setState(123); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -666,13 +647,13 @@ public final RstatementContext rstatement() throws RecognitionException { case TYPE: case ID: { - setState(124); + setState(121); trailer(); } break; case SEMICOLON: { - setState(125); + setState(122); empty(); } break; @@ -685,44 +666,44 @@ public final RstatementContext rstatement() throws RecognitionException { _localctx = new ForContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(128); + setState(125); match(FOR); - setState(129); + setState(126); match(LP); - setState(131); + setState(128); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(130); + setState(127); initializer(); } } - setState(133); + setState(130); match(SEMICOLON); - setState(135); + setState(132); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(134); + setState(131); expression(0); } } - setState(137); + setState(134); match(SEMICOLON); - setState(139); + setState(136); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(138); + setState(135); afterthought(); } } - setState(141); + setState(138); match(RP); - setState(144); + setState(141); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -755,13 +736,13 @@ public final RstatementContext rstatement() throws RecognitionException { case TYPE: case ID: { - setState(142); + setState(139); trailer(); } break; case SEMICOLON: { - setState(143); + setState(140); empty(); } break; @@ -774,21 +755,21 @@ public final RstatementContext rstatement() throws RecognitionException { _localctx = new EachContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(146); + setState(143); match(FOR); - setState(147); + setState(144); match(LP); - setState(148); + setState(145); decltype(); - setState(149); + setState(146); match(ID); - setState(150); + setState(147); match(COLON); - setState(151); + setState(148); expression(0); - setState(152); + setState(149); match(RP); - setState(153); + setState(150); trailer(); } break; @@ -796,19 +777,19 @@ public final RstatementContext rstatement() throws RecognitionException { _localctx = new IneachContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(155); + setState(152); match(FOR); - setState(156); + setState(153); match(LP); - setState(157); + setState(154); match(ID); - setState(158); + setState(155); match(IN); - setState(159); + setState(156); expression(0); - setState(160); + setState(157); match(RP); - setState(161); + setState(158); trailer(); } break; @@ -816,11 +797,11 @@ public final RstatementContext rstatement() throws RecognitionException { _localctx = new TryContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(163); + setState(160); match(TRY); - setState(164); + setState(161); block(); - setState(166); + setState(163); _errHandler.sync(this); _alt = 1; do { @@ -828,7 +809,7 @@ public final RstatementContext rstatement() throws RecognitionException { case 1: { { - setState(165); + setState(162); trap(); } } @@ -836,9 +817,9 @@ public final RstatementContext rstatement() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(168); + setState(165); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,12,_ctx); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -860,7 +841,7 @@ public DstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_dstatement; } - + public DstatementContext() { } public void copyFrom(DstatementContext ctx) { super.copyFrom(ctx); @@ -953,24 +934,24 @@ public final DstatementContext dstatement() throws RecognitionException { DstatementContext _localctx = new DstatementContext(_ctx, getState()); enterRule(_localctx, 10, RULE_dstatement); try { - setState(187); + setState(184); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: _localctx = new DoContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(172); + setState(169); match(DO); - setState(173); + setState(170); block(); - setState(174); + setState(171); match(WHILE); - setState(175); + setState(172); match(LP); - setState(176); + setState(173); expression(0); - setState(177); + setState(174); match(RP); } break; @@ -978,7 +959,7 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new DeclContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(179); + setState(176); declaration(); } break; @@ -986,7 +967,7 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new ContinueContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(180); + setState(177); match(CONTINUE); } break; @@ -994,7 +975,7 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new BreakContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(181); + setState(178); match(BREAK); } break; @@ -1002,9 +983,9 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new ReturnContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(182); + setState(179); match(RETURN); - setState(183); + setState(180); expression(0); } break; @@ -1012,9 +993,9 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new ThrowContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(184); + setState(181); match(THROW); - setState(185); + setState(182); expression(0); } break; @@ -1022,7 +1003,7 @@ public final DstatementContext dstatement() throws RecognitionException { _localctx = new ExprContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(186); + setState(183); expression(0); } break; @@ -1061,12 +1042,12 @@ public final TrailerContext trailer() throws RecognitionException { TrailerContext _localctx = new TrailerContext(_ctx, getState()); enterRule(_localctx, 12, RULE_trailer); try { - setState(191); + setState(188); switch (_input.LA(1)) { case LBRACK: enterOuterAlt(_localctx, 1); { - setState(189); + setState(186); block(); } break; @@ -1101,7 +1082,7 @@ public final TrailerContext trailer() throws RecognitionException { case ID: enterOuterAlt(_localctx, 2); { - setState(190); + setState(187); statement(); } break; @@ -1151,34 +1132,34 @@ public final BlockContext block() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(193); + setState(190); match(LBRACK); - setState(197); + setState(194); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,15,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(194); + setState(191); statement(); } - } + } } - setState(199); + setState(196); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,15,_ctx); } - setState(201); + setState(198); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(200); + setState(197); dstatement(); } } - setState(203); + setState(200); match(RBRACK); } } @@ -1212,7 +1193,7 @@ public final EmptyContext empty() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(205); + setState(202); match(SEMICOLON); } } @@ -1249,20 +1230,20 @@ public final InitializerContext initializer() throws RecognitionException { InitializerContext _localctx = new InitializerContext(_ctx, getState()); enterRule(_localctx, 18, RULE_initializer); try { - setState(209); + setState(206); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,17,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(207); + setState(204); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(208); + setState(205); expression(0); } break; @@ -1300,7 +1281,7 @@ public final AfterthoughtContext afterthought() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(211); + setState(208); expression(0); } } @@ -1347,23 +1328,23 @@ public final DeclarationContext declaration() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(213); + setState(210); decltype(); - setState(214); + setState(211); declvar(); - setState(219); + setState(216); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(215); + setState(212); match(COMMA); - setState(216); + setState(213); declvar(); } } - setState(221); + setState(218); _errHandler.sync(this); _la = _input.LA(1); } @@ -1408,25 +1389,25 @@ public final DecltypeContext decltype() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(222); + setState(219); match(TYPE); - setState(227); + setState(224); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,19,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(223); + setState(220); match(LBRACE); - setState(224); + setState(221); match(RBRACE); } - } + } } - setState(229); + setState(226); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + _alt = getInterpreter().adaptivePredict(_input,19,_ctx); } } } @@ -1465,15 +1446,15 @@ public final DeclvarContext declvar() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(230); + setState(227); match(ID); - setState(233); + setState(230); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(231); + setState(228); match(ASSIGN); - setState(232); + setState(229); expression(0); } } @@ -1517,17 +1498,17 @@ public final TrapContext trap() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(235); + setState(232); match(CATCH); - setState(236); + setState(233); match(LP); - setState(237); + setState(234); match(TYPE); - setState(238); + setState(235); match(ID); - setState(239); + setState(236); match(RP); - setState(240); + setState(237); block(); } } @@ -1547,7 +1528,7 @@ public ExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_expression; } - + public ExpressionContext() { } public void copyFrom(ExpressionContext ctx) { super.copyFrom(ctx); @@ -1723,35 +1704,35 @@ private ExpressionContext expression(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(243); + setState(240); unary(); } _ctx.stop = _input.LT(-1); - setState(295); + setState(292); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(293); + setState(290); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(245); + setState(242); if (!(precpred(_ctx, 15))) throw new FailedPredicateException(this, "precpred(_ctx, 15)"); - setState(246); + setState(243); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(247); + setState(244); expression(16); } break; @@ -1759,16 +1740,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(248); + setState(245); if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); - setState(249); + setState(246); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(250); + setState(247); expression(15); } break; @@ -1776,16 +1757,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(251); + setState(248); if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); - setState(252); + setState(249); _la = _input.LA(1); if ( !(_la==FIND || _la==MATCH) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(253); + setState(250); expression(14); } break; @@ -1793,16 +1774,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(254); + setState(251); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(255); + setState(252); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(256); + setState(253); expression(13); } break; @@ -1810,16 +1791,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(257); + setState(254); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(258); + setState(255); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(259); + setState(256); expression(12); } break; @@ -1827,16 +1808,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(260); + setState(257); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(261); + setState(258); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(262); + setState(259); expression(10); } break; @@ -1844,11 +1825,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(263); + setState(260); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(264); + setState(261); match(BWAND); - setState(265); + setState(262); expression(9); } break; @@ -1856,11 +1837,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(266); + setState(263); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(267); + setState(264); match(XOR); - setState(268); + setState(265); expression(8); } break; @@ -1868,11 +1849,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(269); + setState(266); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(270); + setState(267); match(BWOR); - setState(271); + setState(268); expression(7); } break; @@ -1880,11 +1861,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(272); + setState(269); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(273); + setState(270); match(BOOLAND); - setState(274); + setState(271); expression(6); } break; @@ -1892,11 +1873,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(275); + setState(272); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(276); + setState(273); match(BOOLOR); - setState(277); + setState(274); expression(5); } break; @@ -1904,15 +1885,15 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(278); + setState(275); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(279); + setState(276); match(COND); - setState(280); + setState(277); expression(0); - setState(281); + setState(278); match(COLON); - setState(282); + setState(279); expression(3); } break; @@ -1920,11 +1901,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new ElvisContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(284); + setState(281); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(285); + setState(282); match(ELVIS); - setState(286); + setState(283); expression(2); } break; @@ -1932,16 +1913,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new AssignmentContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(287); + setState(284); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(288); + setState(285); _la = _input.LA(1); if ( !(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & ((1L << (ASSIGN - 60)) | (1L << (AADD - 60)) | (1L << (ASUB - 60)) | (1L << (AMUL - 60)) | (1L << (ADIV - 60)) | (1L << (AREM - 60)) | (1L << (AAND - 60)) | (1L << (AXOR - 60)) | (1L << (AOR - 60)) | (1L << (ALSH - 60)) | (1L << (ARSH - 60)) | (1L << (AUSH - 60)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(289); + setState(286); expression(1); } break; @@ -1949,20 +1930,20 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(290); + setState(287); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(291); + setState(288); match(INSTANCEOF); - setState(292); + setState(289); decltype(); } break; } - } + } } - setState(297); + setState(294); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } } } @@ -1982,7 +1963,7 @@ public UnaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_unary; } - + public UnaryContext() { } public void copyFrom(UnaryContext ctx) { super.copyFrom(ctx); @@ -2062,21 +2043,21 @@ public final UnaryContext unary() throws RecognitionException { enterRule(_localctx, 32, RULE_unary); int _la; try { - setState(311); + setState(308); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(298); + setState(295); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(299); + setState(296); chain(); } break; @@ -2084,9 +2065,9 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(300); + setState(297); chain(); - setState(301); + setState(298); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -2099,7 +2080,7 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(303); + setState(300); chain(); } break; @@ -2107,14 +2088,14 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(304); + setState(301); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(305); + setState(302); unary(); } break; @@ -2122,13 +2103,13 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(306); + setState(303); match(LP); - setState(307); + setState(304); decltype(); - setState(308); + setState(305); match(RP); - setState(309); + setState(306); unary(); } break; @@ -2150,7 +2131,7 @@ public ChainContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_chain; } - + public ChainContext() { } public void copyFrom(ChainContext ctx) { super.copyFrom(ctx); @@ -2210,30 +2191,30 @@ public final ChainContext chain() throws RecognitionException { enterRule(_localctx, 34, RULE_chain); try { int _alt; - setState(329); + setState(326); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(313); + setState(310); primary(); - setState(317); + setState(314); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(314); + setState(311); postfix(); } - } + } } - setState(319); + setState(316); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,24,_ctx); } } break; @@ -2241,25 +2222,25 @@ public final ChainContext chain() throws RecognitionException { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(320); + setState(317); decltype(); - setState(321); + setState(318); postdot(); - setState(325); + setState(322); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(322); + setState(319); postfix(); } - } + } } - setState(327); + setState(324); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); } } break; @@ -2267,7 +2248,7 @@ public final ChainContext chain() throws RecognitionException { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(328); + setState(325); arrayinitializer(); } break; @@ -2289,7 +2270,7 @@ public PrimaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_primary; } - + public PrimaryContext() { } public void copyFrom(PrimaryContext ctx) { super.copyFrom(ctx); @@ -2427,18 +2408,18 @@ public final PrimaryContext primary() throws RecognitionException { enterRule(_localctx, 36, RULE_primary); int _la; try { - setState(349); + setState(346); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: _localctx = new PrecedenceContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(331); + setState(328); match(LP); - setState(332); + setState(329); expression(0); - setState(333); + setState(330); match(RP); } break; @@ -2446,7 +2427,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(335); + setState(332); _la = _input.LA(1); if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)))) != 0)) ) { _errHandler.recoverInline(this); @@ -2459,7 +2440,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(336); + setState(333); match(TRUE); } break; @@ -2467,7 +2448,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(337); + setState(334); match(FALSE); } break; @@ -2475,7 +2456,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(338); + setState(335); match(NULL); } break; @@ -2483,7 +2464,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(339); + setState(336); match(STRING); } break; @@ -2491,7 +2472,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new RegexContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(340); + setState(337); match(REGEX); } break; @@ -2499,7 +2480,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new ListinitContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(341); + setState(338); listinitializer(); } break; @@ -2507,7 +2488,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new MapinitContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(342); + setState(339); mapinitializer(); } break; @@ -2515,7 +2496,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(343); + setState(340); match(ID); } break; @@ -2523,9 +2504,9 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new CalllocalContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(344); + setState(341); match(ID); - setState(345); + setState(342); arguments(); } break; @@ -2533,11 +2514,11 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NewobjectContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(346); + setState(343); match(NEW); - setState(347); + setState(344); match(TYPE); - setState(348); + setState(345); arguments(); } break; @@ -2579,27 +2560,27 @@ public final PostfixContext postfix() throws RecognitionException { PostfixContext _localctx = new PostfixContext(_ctx, getState()); enterRule(_localctx, 38, RULE_postfix); try { - setState(354); + setState(351); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(351); + setState(348); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(352); + setState(349); fieldaccess(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(353); + setState(350); braceaccess(); } break; @@ -2638,20 +2619,20 @@ public final PostdotContext postdot() throws RecognitionException { PostdotContext _localctx = new PostdotContext(_ctx, getState()); enterRule(_localctx, 40, RULE_postdot); try { - setState(358); + setState(355); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(356); + setState(353); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(357); + setState(354); fieldaccess(); } break; @@ -2693,16 +2674,16 @@ public final CallinvokeContext callinvoke() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(360); + setState(357); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(361); + setState(358); match(DOTID); - setState(362); + setState(359); arguments(); } } @@ -2740,14 +2721,14 @@ public final FieldaccessContext fieldaccess() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(364); + setState(361); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(365); + setState(362); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2790,11 +2771,11 @@ public final BraceaccessContext braceaccess() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(367); + setState(364); match(LBRACE); - setState(368); + setState(365); expression(0); - setState(369); + setState(366); match(RBRACE); } } @@ -2814,7 +2795,7 @@ public ArrayinitializerContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_arrayinitializer; } - + public ArrayinitializerContext() { } public void copyFrom(ArrayinitializerContext ctx) { super.copyFrom(ctx); @@ -2890,18 +2871,18 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept int _la; try { int _alt; - setState(412); + setState(409); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { case 1: _localctx = new NewstandardarrayContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(371); + setState(368); match(NEW); - setState(372); + setState(369); match(TYPE); - setState(377); + setState(374); _errHandler.sync(this); _alt = 1; do { @@ -2909,11 +2890,11 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept case 1: { { - setState(373); + setState(370); match(LBRACE); - setState(374); + setState(371); expression(0); - setState(375); + setState(372); match(RBRACE); } } @@ -2921,32 +2902,32 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept default: throw new NoViableAltException(this); } - setState(379); + setState(376); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,31,_ctx); + _alt = getInterpreter().adaptivePredict(_input,30,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(388); + setState(385); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) { case 1: { - setState(381); + setState(378); postdot(); - setState(385); + setState(382); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(382); + setState(379); postfix(); } - } + } } - setState(387); + setState(384); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); } } break; @@ -2957,58 +2938,58 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept _localctx = new NewinitializedarrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(390); + setState(387); match(NEW); - setState(391); + setState(388); match(TYPE); - setState(392); + setState(389); match(LBRACE); - setState(393); + setState(390); match(RBRACE); - setState(394); + setState(391); match(LBRACK); - setState(403); + setState(400); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(395); + setState(392); expression(0); - setState(400); + setState(397); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(396); + setState(393); match(COMMA); - setState(397); + setState(394); expression(0); } } - setState(402); + setState(399); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(405); + setState(402); match(RBRACK); - setState(409); + setState(406); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(406); + setState(403); postfix(); } - } + } } - setState(411); + setState(408); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); } } break; @@ -3054,42 +3035,42 @@ public final ListinitializerContext listinitializer() throws RecognitionExceptio enterRule(_localctx, 50, RULE_listinitializer); int _la; try { - setState(427); + setState(424); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(414); + setState(411); match(LBRACE); - setState(415); + setState(412); expression(0); - setState(420); + setState(417); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(416); + setState(413); match(COMMA); - setState(417); + setState(414); expression(0); } } - setState(422); + setState(419); _errHandler.sync(this); _la = _input.LA(1); } - setState(423); + setState(420); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(425); + setState(422); match(LBRACE); - setState(426); + setState(423); match(RBRACE); } break; @@ -3136,44 +3117,44 @@ public final MapinitializerContext mapinitializer() throws RecognitionException enterRule(_localctx, 52, RULE_mapinitializer); int _la; try { - setState(443); + setState(440); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(429); + setState(426); match(LBRACE); - setState(430); + setState(427); maptoken(); - setState(435); + setState(432); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(431); + setState(428); match(COMMA); - setState(432); + setState(429); maptoken(); } } - setState(437); + setState(434); _errHandler.sync(this); _la = _input.LA(1); } - setState(438); + setState(435); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(440); + setState(437); match(LBRACE); - setState(441); + setState(438); match(COLON); - setState(442); + setState(439); match(RBRACE); } break; @@ -3215,11 +3196,11 @@ public final MaptokenContext maptoken() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(445); + setState(442); expression(0); - setState(446); + setState(443); match(COLON); - setState(447); + setState(444); expression(0); } } @@ -3266,34 +3247,34 @@ public final ArgumentsContext arguments() throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(449); + setState(446); match(LP); - setState(458); + setState(455); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(450); + setState(447); argument(); - setState(455); + setState(452); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(451); + setState(448); match(COMMA); - setState(452); + setState(449); argument(); } } - setState(457); + setState(454); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(460); + setState(457); match(RP); } } @@ -3334,27 +3315,27 @@ public final ArgumentContext argument() throws RecognitionException { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); enterRule(_localctx, 58, RULE_argument); try { - setState(465); + setState(462); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(462); + setState(459); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(463); + setState(460); lambda(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(464); + setState(461); funcref(); } break; @@ -3409,58 +3390,58 @@ public final LambdaContext lambda() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(480); + setState(477); switch (_input.LA(1)) { case TYPE: case ID: { - setState(467); + setState(464); lamtype(); } break; case LP: { - setState(468); + setState(465); match(LP); - setState(477); + setState(474); _la = _input.LA(1); if (_la==TYPE || _la==ID) { { - setState(469); + setState(466); lamtype(); - setState(474); + setState(471); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(470); + setState(467); match(COMMA); - setState(471); + setState(468); lamtype(); } } - setState(476); + setState(473); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(479); + setState(476); match(RP); } break; default: throw new NoViableAltException(this); } - setState(482); + setState(479); match(ARROW); - setState(485); + setState(482); switch (_input.LA(1)) { case LBRACK: { - setState(483); + setState(480); block(); } break; @@ -3485,7 +3466,7 @@ public final LambdaContext lambda() throws RecognitionException { case TYPE: case ID: { - setState(484); + setState(481); expression(0); } break; @@ -3528,16 +3509,16 @@ public final LamtypeContext lamtype() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(488); + setState(485); _la = _input.LA(1); if (_la==TYPE) { { - setState(487); + setState(484); decltype(); } } - setState(490); + setState(487); match(ID); } } @@ -3557,7 +3538,7 @@ public FuncrefContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_funcref; } - + public FuncrefContext() { } public void copyFrom(FuncrefContext ctx) { super.copyFrom(ctx); @@ -3616,18 +3597,18 @@ public final FuncrefContext funcref() throws RecognitionException { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); enterRule(_localctx, 64, RULE_funcref); try { - setState(505); + setState(502); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,49,_ctx) ) { case 1: _localctx = new ClassfuncrefContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(492); + setState(489); match(TYPE); - setState(493); + setState(490); match(REF); - setState(494); + setState(491); match(ID); } break; @@ -3635,11 +3616,11 @@ public final FuncrefContext funcref() throws RecognitionException { _localctx = new ConstructorfuncrefContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(495); + setState(492); decltype(); - setState(496); + setState(493); match(REF); - setState(497); + setState(494); match(NEW); } break; @@ -3647,11 +3628,11 @@ public final FuncrefContext funcref() throws RecognitionException { _localctx = new CapturingfuncrefContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(499); + setState(496); match(ID); - setState(500); + setState(497); match(REF); - setState(501); + setState(498); match(ID); } break; @@ -3659,11 +3640,11 @@ public final FuncrefContext funcref() throws RecognitionException { _localctx = new LocalfuncrefContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(502); + setState(499); match(THIS); - setState(503); + setState(500); match(REF); - setState(504); + setState(501); match(ID); } break; @@ -3733,200 +3714,198 @@ private boolean expression_sempred(ExpressionContext _localctx, int predIndex) { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fe\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fb\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ "\t!\4\"\t\"\3\2\7\2F\n\2\f\2\16\2I\13\2\3\2\7\2L\n\2\f\2\16\2O\13\2\3"+ - "\2\5\2R\n\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7"+ - "\4b\n\4\f\4\16\4e\13\4\5\4g\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5o\n\5\3\6\3"+ - "\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6y\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0081"+ - "\n\6\3\6\3\6\3\6\5\6\u0086\n\6\3\6\3\6\5\6\u008a\n\6\3\6\3\6\5\6\u008e"+ - "\n\6\3\6\3\6\3\6\5\6\u0093\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ - "\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\6\6\u00a9\n\6\r\6\16\6\u00aa"+ - "\5\6\u00ad\n\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7"+ - "\3\7\5\7\u00be\n\7\3\b\3\b\5\b\u00c2\n\b\3\t\3\t\7\t\u00c6\n\t\f\t\16"+ - "\t\u00c9\13\t\3\t\5\t\u00cc\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d4\n"+ - "\13\3\f\3\f\3\r\3\r\3\r\3\r\7\r\u00dc\n\r\f\r\16\r\u00df\13\r\3\16\3\16"+ - "\3\16\7\16\u00e4\n\16\f\16\16\16\u00e7\13\16\3\17\3\17\3\17\5\17\u00ec"+ - "\n\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4_\n\4\f\4\16"+ + "\4b\13\4\5\4d\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5l\n\5\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\5\6v\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6~\n\6\3\6\3\6\3\6\5\6"+ + "\u0083\n\6\3\6\3\6\5\6\u0087\n\6\3\6\3\6\5\6\u008b\n\6\3\6\3\6\3\6\5\6"+ + "\u0090\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\3\6\6\6\u00a6\n\6\r\6\16\6\u00a7\5\6\u00aa\n\6\3\7\3"+ + "\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00bb\n\7\3"+ + "\b\3\b\5\b\u00bf\n\b\3\t\3\t\7\t\u00c3\n\t\f\t\16\t\u00c6\13\t\3\t\5\t"+ + "\u00c9\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d1\n\13\3\f\3\f\3\r\3\r\3"+ + "\r\3\r\7\r\u00d9\n\r\f\r\16\r\u00dc\13\r\3\16\3\16\3\16\7\16\u00e1\n\16"+ + "\f\16\16\16\u00e4\13\16\3\17\3\17\3\17\5\17\u00e9\n\17\3\20\3\20\3\20"+ + "\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\7\21\u0128\n\21\f\21\16\21\u012b\13\21\3\22\3\22\3\22"+ - "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u013a\n\22\3\23"+ - "\3\23\7\23\u013e\n\23\f\23\16\23\u0141\13\23\3\23\3\23\3\23\7\23\u0146"+ - "\n\23\f\23\16\23\u0149\13\23\3\23\5\23\u014c\n\23\3\24\3\24\3\24\3\24"+ - "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24"+ - "\5\24\u0160\n\24\3\25\3\25\3\25\5\25\u0165\n\25\3\26\3\26\5\26\u0169\n"+ - "\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3"+ - "\32\3\32\3\32\3\32\6\32\u017c\n\32\r\32\16\32\u017d\3\32\3\32\7\32\u0182"+ - "\n\32\f\32\16\32\u0185\13\32\5\32\u0187\n\32\3\32\3\32\3\32\3\32\3\32"+ - "\3\32\3\32\3\32\7\32\u0191\n\32\f\32\16\32\u0194\13\32\5\32\u0196\n\32"+ - "\3\32\3\32\7\32\u019a\n\32\f\32\16\32\u019d\13\32\5\32\u019f\n\32\3\33"+ - "\3\33\3\33\3\33\7\33\u01a5\n\33\f\33\16\33\u01a8\13\33\3\33\3\33\3\33"+ - "\3\33\5\33\u01ae\n\33\3\34\3\34\3\34\3\34\7\34\u01b4\n\34\f\34\16\34\u01b7"+ - "\13\34\3\34\3\34\3\34\3\34\3\34\5\34\u01be\n\34\3\35\3\35\3\35\3\35\3"+ - "\36\3\36\3\36\3\36\7\36\u01c8\n\36\f\36\16\36\u01cb\13\36\5\36\u01cd\n"+ - "\36\3\36\3\36\3\37\3\37\3\37\5\37\u01d4\n\37\3 \3 \3 \3 \3 \7 \u01db\n"+ - " \f \16 \u01de\13 \5 \u01e0\n \3 \5 \u01e3\n \3 \3 \3 \5 \u01e8\n \3!"+ - "\5!\u01eb\n!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+ - "\5\"\u01fc\n\"\3\"\2\3 #\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&("+ - "*,.\60\62\64\668:<>@B\2\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3\2,/\3\2>"+ - "I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0237\2G\3\2\2\2\4U\3\2\2\2\6"+ - "Z\3\2\2\2\bn\3\2\2\2\n\u00ac\3\2\2\2\f\u00bd\3\2\2\2\16\u00c1\3\2\2\2"+ - "\20\u00c3\3\2\2\2\22\u00cf\3\2\2\2\24\u00d3\3\2\2\2\26\u00d5\3\2\2\2\30"+ - "\u00d7\3\2\2\2\32\u00e0\3\2\2\2\34\u00e8\3\2\2\2\36\u00ed\3\2\2\2 \u00f4"+ - "\3\2\2\2\"\u0139\3\2\2\2$\u014b\3\2\2\2&\u015f\3\2\2\2(\u0164\3\2\2\2"+ - "*\u0168\3\2\2\2,\u016a\3\2\2\2.\u016e\3\2\2\2\60\u0171\3\2\2\2\62\u019e"+ - "\3\2\2\2\64\u01ad\3\2\2\2\66\u01bd\3\2\2\28\u01bf\3\2\2\2:\u01c3\3\2\2"+ - "\2<\u01d3\3\2\2\2>\u01e2\3\2\2\2@\u01ea\3\2\2\2B\u01fb\3\2\2\2DF\5\4\3"+ - "\2ED\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2JL\5\b\5"+ - "\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NQ\3\2\2\2OM\3\2\2\2PR\5\f\7"+ - "\2QP\3\2\2\2QR\3\2\2\2RS\3\2\2\2ST\7\2\2\3T\3\3\2\2\2UV\5\32\16\2VW\7"+ - "T\2\2WX\5\6\4\2XY\5\20\t\2Y\5\3\2\2\2Zf\7\t\2\2[\\\5\32\16\2\\c\7T\2\2"+ - "]^\7\r\2\2^_\5\32\16\2_`\7T\2\2`b\3\2\2\2a]\3\2\2\2be\3\2\2\2ca\3\2\2"+ - "\2cd\3\2\2\2dg\3\2\2\2ec\3\2\2\2f[\3\2\2\2fg\3\2\2\2gh\3\2\2\2hi\7\n\2"+ - "\2i\7\3\2\2\2jo\5\n\6\2kl\5\f\7\2lm\7\16\2\2mo\3\2\2\2nj\3\2\2\2nk\3\2"+ - "\2\2o\t\3\2\2\2pq\7\17\2\2qr\7\t\2\2rs\5 \21\2st\7\n\2\2tx\5\16\b\2uv"+ - "\7\21\2\2vy\5\16\b\2wy\6\6\2\2xu\3\2\2\2xw\3\2\2\2y\u00ad\3\2\2\2z{\7"+ - "\22\2\2{|\7\t\2\2|}\5 \21\2}\u0080\7\n\2\2~\u0081\5\16\b\2\177\u0081\5"+ - "\22\n\2\u0080~\3\2\2\2\u0080\177\3\2\2\2\u0081\u00ad\3\2\2\2\u0082\u0083"+ - "\7\24\2\2\u0083\u0085\7\t\2\2\u0084\u0086\5\24\13\2\u0085\u0084\3\2\2"+ - "\2\u0085\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0089\7\16\2\2\u0088"+ - "\u008a\5 \21\2\u0089\u0088\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u008b\3\2"+ - "\2\2\u008b\u008d\7\16\2\2\u008c\u008e\5\26\f\2\u008d\u008c\3\2\2\2\u008d"+ - "\u008e\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0092\7\n\2\2\u0090\u0093\5\16"+ - "\b\2\u0091\u0093\5\22\n\2\u0092\u0090\3\2\2\2\u0092\u0091\3\2\2\2\u0093"+ - "\u00ad\3\2\2\2\u0094\u0095\7\24\2\2\u0095\u0096\7\t\2\2\u0096\u0097\5"+ - "\32\16\2\u0097\u0098\7T\2\2\u0098\u0099\7\66\2\2\u0099\u009a\5 \21\2\u009a"+ - "\u009b\7\n\2\2\u009b\u009c\5\16\b\2\u009c\u00ad\3\2\2\2\u009d\u009e\7"+ - "\24\2\2\u009e\u009f\7\t\2\2\u009f\u00a0\7T\2\2\u00a0\u00a1\7\20\2\2\u00a1"+ - "\u00a2\5 \21\2\u00a2\u00a3\7\n\2\2\u00a3\u00a4\5\16\b\2\u00a4\u00ad\3"+ - "\2\2\2\u00a5\u00a6\7\31\2\2\u00a6\u00a8\5\20\t\2\u00a7\u00a9\5\36\20\2"+ - "\u00a8\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab"+ - "\3\2\2\2\u00ab\u00ad\3\2\2\2\u00acp\3\2\2\2\u00acz\3\2\2\2\u00ac\u0082"+ - "\3\2\2\2\u00ac\u0094\3\2\2\2\u00ac\u009d\3\2\2\2\u00ac\u00a5\3\2\2\2\u00ad"+ - "\13\3\2\2\2\u00ae\u00af\7\23\2\2\u00af\u00b0\5\20\t\2\u00b0\u00b1\7\22"+ - "\2\2\u00b1\u00b2\7\t\2\2\u00b2\u00b3\5 \21\2\u00b3\u00b4\7\n\2\2\u00b4"+ - "\u00be\3\2\2\2\u00b5\u00be\5\30\r\2\u00b6\u00be\7\25\2\2\u00b7\u00be\7"+ - "\26\2\2\u00b8\u00b9\7\27\2\2\u00b9\u00be\5 \21\2\u00ba\u00bb\7\33\2\2"+ - "\u00bb\u00be\5 \21\2\u00bc\u00be\5 \21\2\u00bd\u00ae\3\2\2\2\u00bd\u00b5"+ - "\3\2\2\2\u00bd\u00b6\3\2\2\2\u00bd\u00b7\3\2\2\2\u00bd\u00b8\3\2\2\2\u00bd"+ - "\u00ba\3\2\2\2\u00bd\u00bc\3\2\2\2\u00be\r\3\2\2\2\u00bf\u00c2\5\20\t"+ - "\2\u00c0\u00c2\5\b\5\2\u00c1\u00bf\3\2\2\2\u00c1\u00c0\3\2\2\2\u00c2\17"+ - "\3\2\2\2\u00c3\u00c7\7\5\2\2\u00c4\u00c6\5\b\5\2\u00c5\u00c4\3\2\2\2\u00c6"+ - "\u00c9\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cb\3\2"+ - "\2\2\u00c9\u00c7\3\2\2\2\u00ca\u00cc\5\f\7\2\u00cb\u00ca\3\2\2\2\u00cb"+ - "\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7\6\2\2\u00ce\21\3\2\2"+ - "\2\u00cf\u00d0\7\16\2\2\u00d0\23\3\2\2\2\u00d1\u00d4\5\30\r\2\u00d2\u00d4"+ - "\5 \21\2\u00d3\u00d1\3\2\2\2\u00d3\u00d2\3\2\2\2\u00d4\25\3\2\2\2\u00d5"+ - "\u00d6\5 \21\2\u00d6\27\3\2\2\2\u00d7\u00d8\5\32\16\2\u00d8\u00dd\5\34"+ - "\17\2\u00d9\u00da\7\r\2\2\u00da\u00dc\5\34\17\2\u00db\u00d9\3\2\2\2\u00dc"+ - "\u00df\3\2\2\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2\2\2\u00de\31\3\2\2"+ - "\2\u00df\u00dd\3\2\2\2\u00e0\u00e5\7S\2\2\u00e1\u00e2\7\7\2\2\u00e2\u00e4"+ - "\7\b\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3\2\2\2\u00e5"+ - "\u00e6\3\2\2\2\u00e6\33\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8\u00eb\7T\2\2"+ - "\u00e9\u00ea\7>\2\2\u00ea\u00ec\5 \21\2\u00eb\u00e9\3\2\2\2\u00eb\u00ec"+ - "\3\2\2\2\u00ec\35\3\2\2\2\u00ed\u00ee\7\32\2\2\u00ee\u00ef\7\t\2\2\u00ef"+ - "\u00f0\7S\2\2\u00f0\u00f1\7T\2\2\u00f1\u00f2\7\n\2\2\u00f2\u00f3\5\20"+ - "\t\2\u00f3\37\3\2\2\2\u00f4\u00f5\b\21\1\2\u00f5\u00f6\5\"\22\2\u00f6"+ - "\u0129\3\2\2\2\u00f7\u00f8\f\21\2\2\u00f8\u00f9\t\2\2\2\u00f9\u0128\5"+ - " \21\22\u00fa\u00fb\f\20\2\2\u00fb\u00fc\t\3\2\2\u00fc\u0128\5 \21\21"+ - "\u00fd\u00fe\f\17\2\2\u00fe\u00ff\t\4\2\2\u00ff\u0128\5 \21\20\u0100\u0101"+ - "\f\16\2\2\u0101\u0102\t\5\2\2\u0102\u0128\5 \21\17\u0103\u0104\f\r\2\2"+ - "\u0104\u0105\t\6\2\2\u0105\u0128\5 \21\16\u0106\u0107\f\13\2\2\u0107\u0108"+ - "\t\7\2\2\u0108\u0128\5 \21\f\u0109\u010a\f\n\2\2\u010a\u010b\7\60\2\2"+ - "\u010b\u0128\5 \21\13\u010c\u010d\f\t\2\2\u010d\u010e\7\61\2\2\u010e\u0128"+ - "\5 \21\n\u010f\u0110\f\b\2\2\u0110\u0111\7\62\2\2\u0111\u0128\5 \21\t"+ - "\u0112\u0113\f\7\2\2\u0113\u0114\7\63\2\2\u0114\u0128\5 \21\b\u0115\u0116"+ - "\f\6\2\2\u0116\u0117\7\64\2\2\u0117\u0128\5 \21\7\u0118\u0119\f\5\2\2"+ - "\u0119\u011a\7\65\2\2\u011a\u011b\5 \21\2\u011b\u011c\7\66\2\2\u011c\u011d"+ - "\5 \21\5\u011d\u0128\3\2\2\2\u011e\u011f\f\4\2\2\u011f\u0120\7\67\2\2"+ - "\u0120\u0128\5 \21\4\u0121\u0122\f\3\2\2\u0122\u0123\t\b\2\2\u0123\u0128"+ - "\5 \21\3\u0124\u0125\f\f\2\2\u0125\u0126\7\35\2\2\u0126\u0128\5\32\16"+ - "\2\u0127\u00f7\3\2\2\2\u0127\u00fa\3\2\2\2\u0127\u00fd\3\2\2\2\u0127\u0100"+ - "\3\2\2\2\u0127\u0103\3\2\2\2\u0127\u0106\3\2\2\2\u0127\u0109\3\2\2\2\u0127"+ - "\u010c\3\2\2\2\u0127\u010f\3\2\2\2\u0127\u0112\3\2\2\2\u0127\u0115\3\2"+ - "\2\2\u0127\u0118\3\2\2\2\u0127\u011e\3\2\2\2\u0127\u0121\3\2\2\2\u0127"+ - "\u0124\3\2\2\2\u0128\u012b\3\2\2\2\u0129\u0127\3\2\2\2\u0129\u012a\3\2"+ - "\2\2\u012a!\3\2\2\2\u012b\u0129\3\2\2\2\u012c\u012d\t\t\2\2\u012d\u013a"+ - "\5$\23\2\u012e\u012f\5$\23\2\u012f\u0130\t\t\2\2\u0130\u013a\3\2\2\2\u0131"+ - "\u013a\5$\23\2\u0132\u0133\t\n\2\2\u0133\u013a\5\"\22\2\u0134\u0135\7"+ - "\t\2\2\u0135\u0136\5\32\16\2\u0136\u0137\7\n\2\2\u0137\u0138\5\"\22\2"+ - "\u0138\u013a\3\2\2\2\u0139\u012c\3\2\2\2\u0139\u012e\3\2\2\2\u0139\u0131"+ - "\3\2\2\2\u0139\u0132\3\2\2\2\u0139\u0134\3\2\2\2\u013a#\3\2\2\2\u013b"+ - "\u013f\5&\24\2\u013c\u013e\5(\25\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2"+ - "\2\2\u013f\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u014c\3\2\2\2\u0141"+ - "\u013f\3\2\2\2\u0142\u0143\5\32\16\2\u0143\u0147\5*\26\2\u0144\u0146\5"+ - "(\25\2\u0145\u0144\3\2\2\2\u0146\u0149\3\2\2\2\u0147\u0145\3\2\2\2\u0147"+ - "\u0148\3\2\2\2\u0148\u014c\3\2\2\2\u0149\u0147\3\2\2\2\u014a\u014c\5\62"+ - "\32\2\u014b\u013b\3\2\2\2\u014b\u0142\3\2\2\2\u014b\u014a\3\2\2\2\u014c"+ - "%\3\2\2\2\u014d\u014e\7\t\2\2\u014e\u014f\5 \21\2\u014f\u0150\7\n\2\2"+ - "\u0150\u0160\3\2\2\2\u0151\u0160\t\13\2\2\u0152\u0160\7P\2\2\u0153\u0160"+ - "\7Q\2\2\u0154\u0160\7R\2\2\u0155\u0160\7N\2\2\u0156\u0160\7O\2\2\u0157"+ - "\u0160\5\64\33\2\u0158\u0160\5\66\34\2\u0159\u0160\7T\2\2\u015a\u015b"+ - "\7T\2\2\u015b\u0160\5:\36\2\u015c\u015d\7\30\2\2\u015d\u015e\7S\2\2\u015e"+ - "\u0160\5:\36\2\u015f\u014d\3\2\2\2\u015f\u0151\3\2\2\2\u015f\u0152\3\2"+ - "\2\2\u015f\u0153\3\2\2\2\u015f\u0154\3\2\2\2\u015f\u0155\3\2\2\2\u015f"+ - "\u0156\3\2\2\2\u015f\u0157\3\2\2\2\u015f\u0158\3\2\2\2\u015f\u0159\3\2"+ - "\2\2\u015f\u015a\3\2\2\2\u015f\u015c\3\2\2\2\u0160\'\3\2\2\2\u0161\u0165"+ - "\5,\27\2\u0162\u0165\5.\30\2\u0163\u0165\5\60\31\2\u0164\u0161\3\2\2\2"+ - "\u0164\u0162\3\2\2\2\u0164\u0163\3\2\2\2\u0165)\3\2\2\2\u0166\u0169\5"+ - ",\27\2\u0167\u0169\5.\30\2\u0168\u0166\3\2\2\2\u0168\u0167\3\2\2\2\u0169"+ - "+\3\2\2\2\u016a\u016b\t\f\2\2\u016b\u016c\7V\2\2\u016c\u016d\5:\36\2\u016d"+ - "-\3\2\2\2\u016e\u016f\t\f\2\2\u016f\u0170\t\r\2\2\u0170/\3\2\2\2\u0171"+ - "\u0172\7\7\2\2\u0172\u0173\5 \21\2\u0173\u0174\7\b\2\2\u0174\61\3\2\2"+ - "\2\u0175\u0176\7\30\2\2\u0176\u017b\7S\2\2\u0177\u0178\7\7\2\2\u0178\u0179"+ - "\5 \21\2\u0179\u017a\7\b\2\2\u017a\u017c\3\2\2\2\u017b\u0177\3\2\2\2\u017c"+ - "\u017d\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0186\3\2"+ - "\2\2\u017f\u0183\5*\26\2\u0180\u0182\5(\25\2\u0181\u0180\3\2\2\2\u0182"+ - "\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0187\3\2"+ - "\2\2\u0185\u0183\3\2\2\2\u0186\u017f\3\2\2\2\u0186\u0187\3\2\2\2\u0187"+ - "\u019f\3\2\2\2\u0188\u0189\7\30\2\2\u0189\u018a\7S\2\2\u018a\u018b\7\7"+ - "\2\2\u018b\u018c\7\b\2\2\u018c\u0195\7\5\2\2\u018d\u0192\5 \21\2\u018e"+ - "\u018f\7\r\2\2\u018f\u0191\5 \21\2\u0190\u018e\3\2\2\2\u0191\u0194\3\2"+ - "\2\2\u0192\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u0196\3\2\2\2\u0194"+ - "\u0192\3\2\2\2\u0195\u018d\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2"+ - "\2\2\u0197\u019b\7\6\2\2\u0198\u019a\5(\25\2\u0199\u0198\3\2\2\2\u019a"+ - "\u019d\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c\3\2\2\2\u019c\u019f\3\2"+ - "\2\2\u019d\u019b\3\2\2\2\u019e\u0175\3\2\2\2\u019e\u0188\3\2\2\2\u019f"+ - "\63\3\2\2\2\u01a0\u01a1\7\7\2\2\u01a1\u01a6\5 \21\2\u01a2\u01a3\7\r\2"+ - "\2\u01a3\u01a5\5 \21\2\u01a4\u01a2\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6\u01a4"+ - "\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a9"+ - "\u01aa\7\b\2\2\u01aa\u01ae\3\2\2\2\u01ab\u01ac\7\7\2\2\u01ac\u01ae\7\b"+ - "\2\2\u01ad\u01a0\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ae\65\3\2\2\2\u01af\u01b0"+ - "\7\7\2\2\u01b0\u01b5\58\35\2\u01b1\u01b2\7\r\2\2\u01b2\u01b4\58\35\2\u01b3"+ - "\u01b1\3\2\2\2\u01b4\u01b7\3\2\2\2\u01b5\u01b3\3\2\2\2\u01b5\u01b6\3\2"+ - "\2\2\u01b6\u01b8\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b8\u01b9\7\b\2\2\u01b9"+ - "\u01be\3\2\2\2\u01ba\u01bb\7\7\2\2\u01bb\u01bc\7\66\2\2\u01bc\u01be\7"+ - "\b\2\2\u01bd\u01af\3\2\2\2\u01bd\u01ba\3\2\2\2\u01be\67\3\2\2\2\u01bf"+ - "\u01c0\5 \21\2\u01c0\u01c1\7\66\2\2\u01c1\u01c2\5 \21\2\u01c29\3\2\2\2"+ - "\u01c3\u01cc\7\t\2\2\u01c4\u01c9\5<\37\2\u01c5\u01c6\7\r\2\2\u01c6\u01c8"+ - "\5<\37\2\u01c7\u01c5\3\2\2\2\u01c8\u01cb\3\2\2\2\u01c9\u01c7\3\2\2\2\u01c9"+ - "\u01ca\3\2\2\2\u01ca\u01cd\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01c4\3\2"+ - "\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce\u01cf\7\n\2\2\u01cf"+ - ";\3\2\2\2\u01d0\u01d4\5 \21\2\u01d1\u01d4\5> \2\u01d2\u01d4\5B\"\2\u01d3"+ - "\u01d0\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d3\u01d2\3\2\2\2\u01d4=\3\2\2\2"+ - "\u01d5\u01e3\5@!\2\u01d6\u01df\7\t\2\2\u01d7\u01dc\5@!\2\u01d8\u01d9\7"+ - "\r\2\2\u01d9\u01db\5@!\2\u01da\u01d8\3\2\2\2\u01db\u01de\3\2\2\2\u01dc"+ - "\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01e0\3\2\2\2\u01de\u01dc\3\2"+ - "\2\2\u01df\u01d7\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1"+ - "\u01e3\7\n\2\2\u01e2\u01d5\3\2\2\2\u01e2\u01d6\3\2\2\2\u01e3\u01e4\3\2"+ - "\2\2\u01e4\u01e7\79\2\2\u01e5\u01e8\5\20\t\2\u01e6\u01e8\5 \21\2\u01e7"+ - "\u01e5\3\2\2\2\u01e7\u01e6\3\2\2\2\u01e8?\3\2\2\2\u01e9\u01eb\5\32\16"+ - "\2\u01ea\u01e9\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01ed"+ - "\7T\2\2\u01edA\3\2\2\2\u01ee\u01ef\7S\2\2\u01ef\u01f0\78\2\2\u01f0\u01fc"+ - "\7T\2\2\u01f1\u01f2\5\32\16\2\u01f2\u01f3\78\2\2\u01f3\u01f4\7\30\2\2"+ - "\u01f4\u01fc\3\2\2\2\u01f5\u01f6\7T\2\2\u01f6\u01f7\78\2\2\u01f7\u01fc"+ - "\7T\2\2\u01f8\u01f9\7\34\2\2\u01f9\u01fa\78\2\2\u01fa\u01fc\7T\2\2\u01fb"+ - "\u01ee\3\2\2\2\u01fb\u01f1\3\2\2\2\u01fb\u01f5\3\2\2\2\u01fb\u01f8\3\2"+ - "\2\2\u01fcC\3\2\2\2\65GMQcfnx\u0080\u0085\u0089\u008d\u0092\u00aa\u00ac"+ - "\u00bd\u00c1\u00c7\u00cb\u00d3\u00dd\u00e5\u00eb\u0127\u0129\u0139\u013f"+ - "\u0147\u014b\u015f\u0164\u0168\u017d\u0183\u0186\u0192\u0195\u019b\u019e"+ - "\u01a6\u01ad\u01b5\u01bd\u01c9\u01cc\u01d3\u01dc\u01df\u01e2\u01e7\u01ea"+ - "\u01fb"; + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\7\21"+ + "\u0125\n\21\f\21\16\21\u0128\13\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u0137\n\22\3\23\3\23\7\23\u013b\n"+ + "\23\f\23\16\23\u013e\13\23\3\23\3\23\3\23\7\23\u0143\n\23\f\23\16\23\u0146"+ + "\13\23\3\23\5\23\u0149\n\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3"+ + "\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\5\24\u015d\n\24\3\25"+ + "\3\25\3\25\5\25\u0162\n\25\3\26\3\26\5\26\u0166\n\26\3\27\3\27\3\27\3"+ + "\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\6"+ + "\32\u0179\n\32\r\32\16\32\u017a\3\32\3\32\7\32\u017f\n\32\f\32\16\32\u0182"+ + "\13\32\5\32\u0184\n\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\7\32\u018e"+ + "\n\32\f\32\16\32\u0191\13\32\5\32\u0193\n\32\3\32\3\32\7\32\u0197\n\32"+ + "\f\32\16\32\u019a\13\32\5\32\u019c\n\32\3\33\3\33\3\33\3\33\7\33\u01a2"+ + "\n\33\f\33\16\33\u01a5\13\33\3\33\3\33\3\33\3\33\5\33\u01ab\n\33\3\34"+ + "\3\34\3\34\3\34\7\34\u01b1\n\34\f\34\16\34\u01b4\13\34\3\34\3\34\3\34"+ + "\3\34\3\34\5\34\u01bb\n\34\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\7\36"+ + "\u01c5\n\36\f\36\16\36\u01c8\13\36\5\36\u01ca\n\36\3\36\3\36\3\37\3\37"+ + "\3\37\5\37\u01d1\n\37\3 \3 \3 \3 \3 \7 \u01d8\n \f \16 \u01db\13 \5 \u01dd"+ + "\n \3 \5 \u01e0\n \3 \3 \3 \5 \u01e5\n \3!\5!\u01e8\n!\3!\3!\3\"\3\"\3"+ + "\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u01f9\n\"\3\"\2\3 #\2\4"+ + "\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@B\2\17\3"+ + "\3\16\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3\2,/\3\2>I\3\2<=\4\2\36\37#"+ + "$\3\2JM\3\2\13\f\3\2UV\u0233\2G\3\2\2\2\4R\3\2\2\2\6W\3\2\2\2\bk\3\2\2"+ + "\2\n\u00a9\3\2\2\2\f\u00ba\3\2\2\2\16\u00be\3\2\2\2\20\u00c0\3\2\2\2\22"+ + "\u00cc\3\2\2\2\24\u00d0\3\2\2\2\26\u00d2\3\2\2\2\30\u00d4\3\2\2\2\32\u00dd"+ + "\3\2\2\2\34\u00e5\3\2\2\2\36\u00ea\3\2\2\2 \u00f1\3\2\2\2\"\u0136\3\2"+ + "\2\2$\u0148\3\2\2\2&\u015c\3\2\2\2(\u0161\3\2\2\2*\u0165\3\2\2\2,\u0167"+ + "\3\2\2\2.\u016b\3\2\2\2\60\u016e\3\2\2\2\62\u019b\3\2\2\2\64\u01aa\3\2"+ + "\2\2\66\u01ba\3\2\2\28\u01bc\3\2\2\2:\u01c0\3\2\2\2<\u01d0\3\2\2\2>\u01df"+ + "\3\2\2\2@\u01e7\3\2\2\2B\u01f8\3\2\2\2DF\5\4\3\2ED\3\2\2\2FI\3\2\2\2G"+ + "E\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2JL\5\b\5\2KJ\3\2\2\2LO\3\2\2\2"+ + "MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2OM\3\2\2\2PQ\7\2\2\3Q\3\3\2\2\2RS\5\32\16"+ + "\2ST\7T\2\2TU\5\6\4\2UV\5\20\t\2V\5\3\2\2\2Wc\7\t\2\2XY\5\32\16\2Y`\7"+ + "T\2\2Z[\7\r\2\2[\\\5\32\16\2\\]\7T\2\2]_\3\2\2\2^Z\3\2\2\2_b\3\2\2\2`"+ + "^\3\2\2\2`a\3\2\2\2ad\3\2\2\2b`\3\2\2\2cX\3\2\2\2cd\3\2\2\2de\3\2\2\2"+ + "ef\7\n\2\2f\7\3\2\2\2gl\5\n\6\2hi\5\f\7\2ij\t\2\2\2jl\3\2\2\2kg\3\2\2"+ + "\2kh\3\2\2\2l\t\3\2\2\2mn\7\17\2\2no\7\t\2\2op\5 \21\2pq\7\n\2\2qu\5\16"+ + "\b\2rs\7\21\2\2sv\5\16\b\2tv\6\6\2\2ur\3\2\2\2ut\3\2\2\2v\u00aa\3\2\2"+ + "\2wx\7\22\2\2xy\7\t\2\2yz\5 \21\2z}\7\n\2\2{~\5\16\b\2|~\5\22\n\2}{\3"+ + "\2\2\2}|\3\2\2\2~\u00aa\3\2\2\2\177\u0080\7\24\2\2\u0080\u0082\7\t\2\2"+ + "\u0081\u0083\5\24\13\2\u0082\u0081\3\2\2\2\u0082\u0083\3\2\2\2\u0083\u0084"+ + "\3\2\2\2\u0084\u0086\7\16\2\2\u0085\u0087\5 \21\2\u0086\u0085\3\2\2\2"+ + "\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\7\16\2\2\u0089\u008b"+ + "\5\26\f\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b\u008c\3\2\2\2"+ + "\u008c\u008f\7\n\2\2\u008d\u0090\5\16\b\2\u008e\u0090\5\22\n\2\u008f\u008d"+ + "\3\2\2\2\u008f\u008e\3\2\2\2\u0090\u00aa\3\2\2\2\u0091\u0092\7\24\2\2"+ + "\u0092\u0093\7\t\2\2\u0093\u0094\5\32\16\2\u0094\u0095\7T\2\2\u0095\u0096"+ + "\7\66\2\2\u0096\u0097\5 \21\2\u0097\u0098\7\n\2\2\u0098\u0099\5\16\b\2"+ + "\u0099\u00aa\3\2\2\2\u009a\u009b\7\24\2\2\u009b\u009c\7\t\2\2\u009c\u009d"+ + "\7T\2\2\u009d\u009e\7\20\2\2\u009e\u009f\5 \21\2\u009f\u00a0\7\n\2\2\u00a0"+ + "\u00a1\5\16\b\2\u00a1\u00aa\3\2\2\2\u00a2\u00a3\7\31\2\2\u00a3\u00a5\5"+ + "\20\t\2\u00a4\u00a6\5\36\20\2\u00a5\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2"+ + "\u00a7\u00a5\3\2\2\2\u00a7\u00a8\3\2\2\2\u00a8\u00aa\3\2\2\2\u00a9m\3"+ + "\2\2\2\u00a9w\3\2\2\2\u00a9\177\3\2\2\2\u00a9\u0091\3\2\2\2\u00a9\u009a"+ + "\3\2\2\2\u00a9\u00a2\3\2\2\2\u00aa\13\3\2\2\2\u00ab\u00ac\7\23\2\2\u00ac"+ + "\u00ad\5\20\t\2\u00ad\u00ae\7\22\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\5"+ + " \21\2\u00b0\u00b1\7\n\2\2\u00b1\u00bb\3\2\2\2\u00b2\u00bb\5\30\r\2\u00b3"+ + "\u00bb\7\25\2\2\u00b4\u00bb\7\26\2\2\u00b5\u00b6\7\27\2\2\u00b6\u00bb"+ + "\5 \21\2\u00b7\u00b8\7\33\2\2\u00b8\u00bb\5 \21\2\u00b9\u00bb\5 \21\2"+ + "\u00ba\u00ab\3\2\2\2\u00ba\u00b2\3\2\2\2\u00ba\u00b3\3\2\2\2\u00ba\u00b4"+ + "\3\2\2\2\u00ba\u00b5\3\2\2\2\u00ba\u00b7\3\2\2\2\u00ba\u00b9\3\2\2\2\u00bb"+ + "\r\3\2\2\2\u00bc\u00bf\5\20\t\2\u00bd\u00bf\5\b\5\2\u00be\u00bc\3\2\2"+ + "\2\u00be\u00bd\3\2\2\2\u00bf\17\3\2\2\2\u00c0\u00c4\7\5\2\2\u00c1\u00c3"+ + "\5\b\5\2\u00c2\u00c1\3\2\2\2\u00c3\u00c6\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4"+ + "\u00c5\3\2\2\2\u00c5\u00c8\3\2\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00c9\5\f"+ + "\7\2\u00c8\u00c7\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca"+ + "\u00cb\7\6\2\2\u00cb\21\3\2\2\2\u00cc\u00cd\7\16\2\2\u00cd\23\3\2\2\2"+ + "\u00ce\u00d1\5\30\r\2\u00cf\u00d1\5 \21\2\u00d0\u00ce\3\2\2\2\u00d0\u00cf"+ + "\3\2\2\2\u00d1\25\3\2\2\2\u00d2\u00d3\5 \21\2\u00d3\27\3\2\2\2\u00d4\u00d5"+ + "\5\32\16\2\u00d5\u00da\5\34\17\2\u00d6\u00d7\7\r\2\2\u00d7\u00d9\5\34"+ + "\17\2\u00d8\u00d6\3\2\2\2\u00d9\u00dc\3\2\2\2\u00da\u00d8\3\2\2\2\u00da"+ + "\u00db\3\2\2\2\u00db\31\3\2\2\2\u00dc\u00da\3\2\2\2\u00dd\u00e2\7S\2\2"+ + "\u00de\u00df\7\7\2\2\u00df\u00e1\7\b\2\2\u00e0\u00de\3\2\2\2\u00e1\u00e4"+ + "\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3\33\3\2\2\2\u00e4"+ + "\u00e2\3\2\2\2\u00e5\u00e8\7T\2\2\u00e6\u00e7\7>\2\2\u00e7\u00e9\5 \21"+ + "\2\u00e8\u00e6\3\2\2\2\u00e8\u00e9\3\2\2\2\u00e9\35\3\2\2\2\u00ea\u00eb"+ + "\7\32\2\2\u00eb\u00ec\7\t\2\2\u00ec\u00ed\7S\2\2\u00ed\u00ee\7T\2\2\u00ee"+ + "\u00ef\7\n\2\2\u00ef\u00f0\5\20\t\2\u00f0\37\3\2\2\2\u00f1\u00f2\b\21"+ + "\1\2\u00f2\u00f3\5\"\22\2\u00f3\u0126\3\2\2\2\u00f4\u00f5\f\21\2\2\u00f5"+ + "\u00f6\t\3\2\2\u00f6\u0125\5 \21\22\u00f7\u00f8\f\20\2\2\u00f8\u00f9\t"+ + "\4\2\2\u00f9\u0125\5 \21\21\u00fa\u00fb\f\17\2\2\u00fb\u00fc\t\5\2\2\u00fc"+ + "\u0125\5 \21\20\u00fd\u00fe\f\16\2\2\u00fe\u00ff\t\6\2\2\u00ff\u0125\5"+ + " \21\17\u0100\u0101\f\r\2\2\u0101\u0102\t\7\2\2\u0102\u0125\5 \21\16\u0103"+ + "\u0104\f\13\2\2\u0104\u0105\t\b\2\2\u0105\u0125\5 \21\f\u0106\u0107\f"+ + "\n\2\2\u0107\u0108\7\60\2\2\u0108\u0125\5 \21\13\u0109\u010a\f\t\2\2\u010a"+ + "\u010b\7\61\2\2\u010b\u0125\5 \21\n\u010c\u010d\f\b\2\2\u010d\u010e\7"+ + "\62\2\2\u010e\u0125\5 \21\t\u010f\u0110\f\7\2\2\u0110\u0111\7\63\2\2\u0111"+ + "\u0125\5 \21\b\u0112\u0113\f\6\2\2\u0113\u0114\7\64\2\2\u0114\u0125\5"+ + " \21\7\u0115\u0116\f\5\2\2\u0116\u0117\7\65\2\2\u0117\u0118\5 \21\2\u0118"+ + "\u0119\7\66\2\2\u0119\u011a\5 \21\5\u011a\u0125\3\2\2\2\u011b\u011c\f"+ + "\4\2\2\u011c\u011d\7\67\2\2\u011d\u0125\5 \21\4\u011e\u011f\f\3\2\2\u011f"+ + "\u0120\t\t\2\2\u0120\u0125\5 \21\3\u0121\u0122\f\f\2\2\u0122\u0123\7\35"+ + "\2\2\u0123\u0125\5\32\16\2\u0124\u00f4\3\2\2\2\u0124\u00f7\3\2\2\2\u0124"+ + "\u00fa\3\2\2\2\u0124\u00fd\3\2\2\2\u0124\u0100\3\2\2\2\u0124\u0103\3\2"+ + "\2\2\u0124\u0106\3\2\2\2\u0124\u0109\3\2\2\2\u0124\u010c\3\2\2\2\u0124"+ + "\u010f\3\2\2\2\u0124\u0112\3\2\2\2\u0124\u0115\3\2\2\2\u0124\u011b\3\2"+ + "\2\2\u0124\u011e\3\2\2\2\u0124\u0121\3\2\2\2\u0125\u0128\3\2\2\2\u0126"+ + "\u0124\3\2\2\2\u0126\u0127\3\2\2\2\u0127!\3\2\2\2\u0128\u0126\3\2\2\2"+ + "\u0129\u012a\t\n\2\2\u012a\u0137\5$\23\2\u012b\u012c\5$\23\2\u012c\u012d"+ + "\t\n\2\2\u012d\u0137\3\2\2\2\u012e\u0137\5$\23\2\u012f\u0130\t\13\2\2"+ + "\u0130\u0137\5\"\22\2\u0131\u0132\7\t\2\2\u0132\u0133\5\32\16\2\u0133"+ + "\u0134\7\n\2\2\u0134\u0135\5\"\22\2\u0135\u0137\3\2\2\2\u0136\u0129\3"+ + "\2\2\2\u0136\u012b\3\2\2\2\u0136\u012e\3\2\2\2\u0136\u012f\3\2\2\2\u0136"+ + "\u0131\3\2\2\2\u0137#\3\2\2\2\u0138\u013c\5&\24\2\u0139\u013b\5(\25\2"+ + "\u013a\u0139\3\2\2\2\u013b\u013e\3\2\2\2\u013c\u013a\3\2\2\2\u013c\u013d"+ + "\3\2\2\2\u013d\u0149\3\2\2\2\u013e\u013c\3\2\2\2\u013f\u0140\5\32\16\2"+ + "\u0140\u0144\5*\26\2\u0141\u0143\5(\25\2\u0142\u0141\3\2\2\2\u0143\u0146"+ + "\3\2\2\2\u0144\u0142\3\2\2\2\u0144\u0145\3\2\2\2\u0145\u0149\3\2\2\2\u0146"+ + "\u0144\3\2\2\2\u0147\u0149\5\62\32\2\u0148\u0138\3\2\2\2\u0148\u013f\3"+ + "\2\2\2\u0148\u0147\3\2\2\2\u0149%\3\2\2\2\u014a\u014b\7\t\2\2\u014b\u014c"+ + "\5 \21\2\u014c\u014d\7\n\2\2\u014d\u015d\3\2\2\2\u014e\u015d\t\f\2\2\u014f"+ + "\u015d\7P\2\2\u0150\u015d\7Q\2\2\u0151\u015d\7R\2\2\u0152\u015d\7N\2\2"+ + "\u0153\u015d\7O\2\2\u0154\u015d\5\64\33\2\u0155\u015d\5\66\34\2\u0156"+ + "\u015d\7T\2\2\u0157\u0158\7T\2\2\u0158\u015d\5:\36\2\u0159\u015a\7\30"+ + "\2\2\u015a\u015b\7S\2\2\u015b\u015d\5:\36\2\u015c\u014a\3\2\2\2\u015c"+ + "\u014e\3\2\2\2\u015c\u014f\3\2\2\2\u015c\u0150\3\2\2\2\u015c\u0151\3\2"+ + "\2\2\u015c\u0152\3\2\2\2\u015c\u0153\3\2\2\2\u015c\u0154\3\2\2\2\u015c"+ + "\u0155\3\2\2\2\u015c\u0156\3\2\2\2\u015c\u0157\3\2\2\2\u015c\u0159\3\2"+ + "\2\2\u015d\'\3\2\2\2\u015e\u0162\5,\27\2\u015f\u0162\5.\30\2\u0160\u0162"+ + "\5\60\31\2\u0161\u015e\3\2\2\2\u0161\u015f\3\2\2\2\u0161\u0160\3\2\2\2"+ + "\u0162)\3\2\2\2\u0163\u0166\5,\27\2\u0164\u0166\5.\30\2\u0165\u0163\3"+ + "\2\2\2\u0165\u0164\3\2\2\2\u0166+\3\2\2\2\u0167\u0168\t\r\2\2\u0168\u0169"+ + "\7V\2\2\u0169\u016a\5:\36\2\u016a-\3\2\2\2\u016b\u016c\t\r\2\2\u016c\u016d"+ + "\t\16\2\2\u016d/\3\2\2\2\u016e\u016f\7\7\2\2\u016f\u0170\5 \21\2\u0170"+ + "\u0171\7\b\2\2\u0171\61\3\2\2\2\u0172\u0173\7\30\2\2\u0173\u0178\7S\2"+ + "\2\u0174\u0175\7\7\2\2\u0175\u0176\5 \21\2\u0176\u0177\7\b\2\2\u0177\u0179"+ + "\3\2\2\2\u0178\u0174\3\2\2\2\u0179\u017a\3\2\2\2\u017a\u0178\3\2\2\2\u017a"+ + "\u017b\3\2\2\2\u017b\u0183\3\2\2\2\u017c\u0180\5*\26\2\u017d\u017f\5("+ + "\25\2\u017e\u017d\3\2\2\2\u017f\u0182\3\2\2\2\u0180\u017e\3\2\2\2\u0180"+ + "\u0181\3\2\2\2\u0181\u0184\3\2\2\2\u0182\u0180\3\2\2\2\u0183\u017c\3\2"+ + "\2\2\u0183\u0184\3\2\2\2\u0184\u019c\3\2\2\2\u0185\u0186\7\30\2\2\u0186"+ + "\u0187\7S\2\2\u0187\u0188\7\7\2\2\u0188\u0189\7\b\2\2\u0189\u0192\7\5"+ + "\2\2\u018a\u018f\5 \21\2\u018b\u018c\7\r\2\2\u018c\u018e\5 \21\2\u018d"+ + "\u018b\3\2\2\2\u018e\u0191\3\2\2\2\u018f\u018d\3\2\2\2\u018f\u0190\3\2"+ + "\2\2\u0190\u0193\3\2\2\2\u0191\u018f\3\2\2\2\u0192\u018a\3\2\2\2\u0192"+ + "\u0193\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0198\7\6\2\2\u0195\u0197\5("+ + "\25\2\u0196\u0195\3\2\2\2\u0197\u019a\3\2\2\2\u0198\u0196\3\2\2\2\u0198"+ + "\u0199\3\2\2\2\u0199\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019b\u0172\3\2"+ + "\2\2\u019b\u0185\3\2\2\2\u019c\63\3\2\2\2\u019d\u019e\7\7\2\2\u019e\u01a3"+ + "\5 \21\2\u019f\u01a0\7\r\2\2\u01a0\u01a2\5 \21\2\u01a1\u019f\3\2\2\2\u01a2"+ + "\u01a5\3\2\2\2\u01a3\u01a1\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01a6\3\2"+ + "\2\2\u01a5\u01a3\3\2\2\2\u01a6\u01a7\7\b\2\2\u01a7\u01ab\3\2\2\2\u01a8"+ + "\u01a9\7\7\2\2\u01a9\u01ab\7\b\2\2\u01aa\u019d\3\2\2\2\u01aa\u01a8\3\2"+ + "\2\2\u01ab\65\3\2\2\2\u01ac\u01ad\7\7\2\2\u01ad\u01b2\58\35\2\u01ae\u01af"+ + "\7\r\2\2\u01af\u01b1\58\35\2\u01b0\u01ae\3\2\2\2\u01b1\u01b4\3\2\2\2\u01b2"+ + "\u01b0\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b5\3\2\2\2\u01b4\u01b2\3\2"+ + "\2\2\u01b5\u01b6\7\b\2\2\u01b6\u01bb\3\2\2\2\u01b7\u01b8\7\7\2\2\u01b8"+ + "\u01b9\7\66\2\2\u01b9\u01bb\7\b\2\2\u01ba\u01ac\3\2\2\2\u01ba\u01b7\3"+ + "\2\2\2\u01bb\67\3\2\2\2\u01bc\u01bd\5 \21\2\u01bd\u01be\7\66\2\2\u01be"+ + "\u01bf\5 \21\2\u01bf9\3\2\2\2\u01c0\u01c9\7\t\2\2\u01c1\u01c6\5<\37\2"+ + "\u01c2\u01c3\7\r\2\2\u01c3\u01c5\5<\37\2\u01c4\u01c2\3\2\2\2\u01c5\u01c8"+ + "\3\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7\u01ca\3\2\2\2\u01c8"+ + "\u01c6\3\2\2\2\u01c9\u01c1\3\2\2\2\u01c9\u01ca\3\2\2\2\u01ca\u01cb\3\2"+ + "\2\2\u01cb\u01cc\7\n\2\2\u01cc;\3\2\2\2\u01cd\u01d1\5 \21\2\u01ce\u01d1"+ + "\5> \2\u01cf\u01d1\5B\"\2\u01d0\u01cd\3\2\2\2\u01d0\u01ce\3\2\2\2\u01d0"+ + "\u01cf\3\2\2\2\u01d1=\3\2\2\2\u01d2\u01e0\5@!\2\u01d3\u01dc\7\t\2\2\u01d4"+ + "\u01d9\5@!\2\u01d5\u01d6\7\r\2\2\u01d6\u01d8\5@!\2\u01d7\u01d5\3\2\2\2"+ + "\u01d8\u01db\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\u01dd"+ + "\3\2\2\2\u01db\u01d9\3\2\2\2\u01dc\u01d4\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd"+ + "\u01de\3\2\2\2\u01de\u01e0\7\n\2\2\u01df\u01d2\3\2\2\2\u01df\u01d3\3\2"+ + "\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\79\2\2\u01e2\u01e5\5\20\t\2\u01e3"+ + "\u01e5\5 \21\2\u01e4\u01e2\3\2\2\2\u01e4\u01e3\3\2\2\2\u01e5?\3\2\2\2"+ + "\u01e6\u01e8\5\32\16\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01e9"+ + "\3\2\2\2\u01e9\u01ea\7T\2\2\u01eaA\3\2\2\2\u01eb\u01ec\7S\2\2\u01ec\u01ed"+ + "\78\2\2\u01ed\u01f9\7T\2\2\u01ee\u01ef\5\32\16\2\u01ef\u01f0\78\2\2\u01f0"+ + "\u01f1\7\30\2\2\u01f1\u01f9\3\2\2\2\u01f2\u01f3\7T\2\2\u01f3\u01f4\78"+ + "\2\2\u01f4\u01f9\7T\2\2\u01f5\u01f6\7\34\2\2\u01f6\u01f7\78\2\2\u01f7"+ + "\u01f9\7T\2\2\u01f8\u01eb\3\2\2\2\u01f8\u01ee\3\2\2\2\u01f8\u01f2\3\2"+ + "\2\2\u01f8\u01f5\3\2\2\2\u01f9C\3\2\2\2\64GM`cku}\u0082\u0086\u008a\u008f"+ + "\u00a7\u00a9\u00ba\u00be\u00c4\u00c8\u00d0\u00da\u00e2\u00e8\u0124\u0126"+ + "\u0136\u013c\u0144\u0148\u015c\u0161\u0165\u017a\u0180\u0183\u018f\u0192"+ + "\u0198\u019b\u01a3\u01aa\u01b2\u01ba\u01c6\u01c9\u01d0\u01d9\u01dc\u01df"+ + "\u01e4\u01e7\u01f8"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 6c8d3a62e065b..dc5c164244d96 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -261,10 +261,6 @@ public ANode visitSource(SourceContext ctx) { statements.add((AStatement)visit(statement)); } - if (ctx.dstatement() != null) { - statements.add((AStatement)visit(ctx.dstatement())); - } - return new SSource(scriptClassInfo, settings, sourceName, debugStream, (MainMethodReserved)reserved.pop(), location(ctx), functions, globals, statements); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java new file mode 100644 index 0000000000000..41178dd5d7506 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.lookup; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.List; + +public class PainlessBinding { + + public final Constructor javaConstructor; + public final Method javaMethod; + + public final Class returnType; + public final List> typeParameters; + + PainlessBinding(Constructor javaConstructor, Method javaMethod, Class returnType, List> typeParameters) { + this.javaConstructor = javaConstructor; + this.javaMethod = javaMethod; + + this.returnType = returnType; + this.typeParameters = typeParameters; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 50bb79dcfbdf5..f5d6c97bb2f3e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -24,6 +24,7 @@ import java.util.Map; public final class PainlessClass { + public final Map constructors; public final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java index a61215e9ed749..92100d1bda0c0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java @@ -24,6 +24,7 @@ import java.util.Map; final class PainlessClassBuilder { + final Map constructors; final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java index 76597c1a29d65..a3dc6c8122bd6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java @@ -25,6 +25,7 @@ import java.util.List; public class PainlessConstructor { + public final Constructor javaConstructor; public final List> typeParameters; public final MethodHandle methodHandle; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java index a55d6c3730ebd..9567e97331c7a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java @@ -23,6 +23,7 @@ import java.lang.reflect.Field; public final class PainlessField { + public final Field javaField; public final Class typeParameter; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index 16b8ac14f14f2..2d6ed3e361dc3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.painless.lookup.PainlessLookupUtility.DEF_CLASS_NAME; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey; @@ -36,18 +37,23 @@ public final class PainlessLookup { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClass> classesToPainlessClasses; - PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses) { + private final Map painlessMethodKeysToPainlessBindings; + + PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, + Map painlessMethodKeysToPainlessBindings) { Objects.requireNonNull(canonicalClassNamesToClasses); Objects.requireNonNull(classesToPainlessClasses); this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses); this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses); + + this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings); } public boolean isValidCanonicalClassName(String canonicalClassName) { Objects.requireNonNull(canonicalClassName); - return canonicalClassNamesToClasses.containsKey(canonicalClassName); + return DEF_CLASS_NAME.equals(canonicalClassName) || canonicalClassNamesToClasses.containsKey(canonicalClassName); } public Class canonicalTypeNameToType(String canonicalTypeName) { @@ -161,6 +167,14 @@ public PainlessField lookupPainlessField(Class targetClass, boolean isStatic, return painlessField; } + public PainlessBinding lookupPainlessBinding(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + } + public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class targetClass) { PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index e644453a4c1ba..a64814f866113 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.lookup; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistBinding; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistConstructor; import org.elasticsearch.painless.spi.WhitelistField; @@ -52,11 +53,11 @@ public final class PainlessLookupBuilder { private static class PainlessConstructorCacheKey { - private final Class targetType; + private final Class targetClass; private final List> typeParameters; - private PainlessConstructorCacheKey(Class targetType, List> typeParameters) { - this.targetType = targetType; + private PainlessConstructorCacheKey(Class targetClass, List> typeParameters) { + this.targetClass = targetClass; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -72,25 +73,27 @@ public boolean equals(Object object) { PainlessConstructorCacheKey that = (PainlessConstructorCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, typeParameters); + return Objects.hash(targetClass, typeParameters); } } private static class PainlessMethodCacheKey { - private final Class targetType; + private final Class targetClass; private final String methodName; + private final Class returnType; private final List> typeParameters; - private PainlessMethodCacheKey(Class targetType, String methodName, List> typeParameters) { - this.targetType = targetType; + private PainlessMethodCacheKey(Class targetClass, String methodName, Class returnType, List> typeParameters) { + this.targetClass = targetClass; this.methodName = methodName; + this.returnType = returnType; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -106,25 +109,26 @@ public boolean equals(Object object) { PainlessMethodCacheKey that = (PainlessMethodCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(methodName, that.methodName) && + Objects.equals(returnType, that.returnType) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, methodName, typeParameters); + return Objects.hash(targetClass, methodName, returnType, typeParameters); } } private static class PainlessFieldCacheKey { - private final Class targetType; + private final Class targetClass; private final String fieldName; private final Class typeParameter; - private PainlessFieldCacheKey(Class targetType, String fieldName, Class typeParameter) { - this.targetType = targetType; + private PainlessFieldCacheKey(Class targetClass, String fieldName, Class typeParameter) { + this.targetClass = targetClass; this.fieldName = fieldName; this.typeParameter = typeParameter; } @@ -141,20 +145,61 @@ public boolean equals(Object object) { PainlessFieldCacheKey that = (PainlessFieldCacheKey) object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(fieldName, that.fieldName) && Objects.equals(typeParameter, that.typeParameter); } @Override public int hashCode() { - return Objects.hash(targetType, fieldName, typeParameter); + return Objects.hash(targetClass, fieldName, typeParameter); } } - private static final Map painlessConstuctorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); + private static class PainlessBindingCacheKey { + + private final Class targetClass; + private final String methodName; + private final Class methodReturnType; + private final List> methodTypeParameters; + + private PainlessBindingCacheKey(Class targetClass, + String methodName, Class returnType, List> typeParameters) { + + this.targetClass = targetClass; + this.methodName = methodName; + this.methodReturnType = returnType; + this.methodTypeParameters = Collections.unmodifiableList(typeParameters); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PainlessBindingCacheKey that = (PainlessBindingCacheKey)object; + + return Objects.equals(targetClass, that.targetClass) && + Objects.equals(methodName, that.methodName) && + Objects.equals(methodReturnType, that.methodReturnType) && + Objects.equals(methodTypeParameters, that.methodTypeParameters); + } + + @Override + public int hashCode() { + return Objects.hash(targetClass, methodName, methodReturnType, methodTypeParameters); + } + } + + private static final Map painlessConstructorCache = new HashMap<>(); + private static final Map painlessMethodCache = new HashMap<>(); + private static final Map painlessFieldCache = new HashMap<>(); + private static final Map painlessBindingCache = new HashMap<>(); private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); @@ -197,6 +242,14 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { targetCanonicalClassName, whitelistField.fieldName, whitelistField.canonicalTypeNameParameter); } } + + for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) { + origin = whitelistBinding.origin; + painlessLookupBuilder.addPainlessBinding( + whitelist.classLoader, whitelistBinding.targetJavaClassName, + whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName, + whitelistBinding.canonicalTypeNameParameters); + } } } catch (Exception exception) { throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); @@ -208,12 +261,13 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; + private final Map painlessMethodKeysToPainlessBindings; + public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); - canonicalClassNamesToClasses.put(DEF_CLASS_NAME, def.class); - classesToPainlessClassBuilders.put(def.class, new PainlessClassBuilder()); + painlessMethodKeysToPainlessBindings = new HashMap<>(); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -225,7 +279,7 @@ private boolean isValidType(Class type) { type = type.getComponentType(); } - return classesToPainlessClassBuilders.containsKey(type); + return type == def.class || classesToPainlessClassBuilders.containsKey(type); } public void addPainlessClass(ClassLoader classLoader, String javaClassName, boolean importClassName) { @@ -395,7 +449,7 @@ public void addPainlessConstructor(Class targetClass, List> typePara MethodType methodType = methodHandle.type(); - painlessConstructor = painlessConstuctorCache.computeIfAbsent( + painlessConstructor = painlessConstructorCache.computeIfAbsent( new PainlessConstructorCacheKey(targetClass, typeParameters), key -> new PainlessConstructor(javaConstructor, typeParameters, methodHandle, methodType) ); @@ -442,7 +496,7 @@ public void addPainlessMethod(ClassLoader classLoader, String targetCanonicalCla Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException("parameter type [" + canonicalTypeNameParameter + "] not found for method " + + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -452,7 +506,7 @@ public void addPainlessMethod(ClassLoader classLoader, String targetCanonicalCla Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException("parameter type [" + returnCanonicalTypeName + "] not found for method " + + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -551,7 +605,7 @@ public void addPainlessMethod(Class targetClass, Class augmentedClass, Str MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.staticMethods.put(painlessMethodKey, painlessMethod); @@ -591,7 +645,7 @@ public void addPainlessMethod(Class targetClass, Class augmentedClass, Str MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.methods.put(painlessMethodKey, painlessMethod); @@ -734,6 +788,183 @@ public void addPainlessField(Class targetClass, String fieldName, Class ty } } + public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(classLoader); + Objects.requireNonNull(targetJavaClassName); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass; + + try { + targetClass = Class.forName(targetJavaClassName, true, classLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addPainlessBinding(targetClass, methodName, returnType, typeParameters); + } + + public void addPainlessBinding(Class targetClass, String methodName, Class returnType, List> typeParameters) { + + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + + Constructor[] javaConstructors = targetClass.getConstructors(); + Constructor javaConstructor = null; + + for (Constructor eachJavaConstructor : javaConstructors) { + if (eachJavaConstructor.getDeclaringClass() == targetClass) { + if (javaConstructor != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors"); + } + + javaConstructor = eachJavaConstructor; + } + } + + if (javaConstructor == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor"); + } + + int constructorTypeParametersSize = javaConstructor.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < constructorTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "]."); + } + + Method[] javaMethods = targetClass.getMethods(); + Method javaMethod = null; + + for (Method eachJavaMethod : javaMethods) { + if (eachJavaMethod.getDeclaringClass() == targetClass) { + if (javaMethod != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods"); + } + + javaMethod = eachJavaMethod; + } + } + + if (javaMethod == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method"); + } + + int methodTypeParametersSize = javaMethod.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(constructorTypeParametersSize + typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize); + PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + + if (painlessBinding == null) { + Constructor finalJavaConstructor = javaConstructor; + Method finalJavaMethod = javaMethod; + + painlessBinding = painlessBindingCache.computeIfAbsent( + new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters), + key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters)); + + painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding); + } else if (painlessBinding.javaConstructor.equals(javaConstructor) == false || + painlessBinding.javaMethod.equals(javaMethod) == false || + painlessBinding.returnType != returnType || + painlessBinding.typeParameters.equals(typeParameters) == false) { + throw new IllegalArgumentException("cannot have bindings " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " + + typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " + + "with the same name and arity but different constructors or methods"); + } + } + public PainlessLookup build() { copyPainlessClassMembers(); cacheRuntimeHandles(); @@ -745,7 +976,7 @@ public PainlessLookup build() { classesToPainlessClasses.put(painlessClassBuilderEntry.getKey(), painlessClassBuilderEntry.getValue().build()); } - return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses); + return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses, painlessMethodKeysToPainlessBindings); } private void copyPainlessClassMembers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java index f2eb434516961..71cacab9eba9d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java @@ -82,7 +82,7 @@ public static Class canonicalTypeNameToType(String canonicalTypeName, Map type = canonicalClassNamesToClasses.get(canonicalTypeName); + Class type = DEF_CLASS_NAME.equals(canonicalTypeName) ? def.class : canonicalClassNamesToClasses.get(canonicalTypeName); if (type != null) { return type; @@ -105,7 +105,7 @@ public static Class canonicalTypeNameToType(String canonicalTypeName, Map targetClass; public final Class returnType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index 1f9973df19224..8ae6ad9723da4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -24,8 +24,12 @@ import org.elasticsearch.painless.Locals.LocalMethod; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.lookup.PainlessBinding; +import org.objectweb.asm.Label; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -41,6 +45,7 @@ public final class ECallLocal extends AExpression { private final List arguments; private LocalMethod method = null; + private PainlessBinding binding = null; public ECallLocal(Location location, String name, List arguments) { super(location); @@ -60,32 +65,71 @@ void extractVariables(Set variables) { void analyze(Locals locals) { method = locals.getMethod(name, arguments.size()); + if (method == null) { - throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size()); + + if (binding == null) { + throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } } + List> typeParameters = new ArrayList<>(method == null ? binding.typeParameters : method.typeParameters); + for (int argument = 0; argument < arguments.size(); ++argument) { AExpression expression = arguments.get(argument); - expression.expected = method.typeParameters.get(argument); + expression.expected = typeParameters.get(argument); expression.internal = true; expression.analyze(locals); arguments.set(argument, expression.cast(locals)); } statement = true; - actual = method.returnType; + actual = method == null ? binding.returnType : method.returnType; } @Override void write(MethodWriter writer, Globals globals) { writer.writeDebugInfo(location); - for (AExpression argument : arguments) { - argument.write(writer, globals); - } + if (method == null) { + String name = globals.addBinding(binding.javaConstructor.getDeclaringClass()); + Type type = Type.getType(binding.javaConstructor.getDeclaringClass()); + int javaConstructorParameterCount = binding.javaConstructor.getParameterCount(); + + Label nonNull = new Label(); - writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + writer.ifNonNull(nonNull); + writer.loadThis(); + writer.newInstance(type); + writer.dup(); + + for (int argument = 0; argument < javaConstructorParameterCount; ++argument) { + arguments.get(argument).write(writer, globals); + } + + writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor)); + writer.putField(CLASS_TYPE, name, type); + + writer.mark(nonNull); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + + for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) { + arguments.get(argument + javaConstructorParameterCount).write(writer, globals); + } + + writer.invokeVirtual(type, Method.getMethod(binding.javaMethod)); + } else { + for (AExpression argument : arguments) { + argument.write(writer, globals); + } + + writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + } } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 0f7445a38c44c..8abd3c7185d87 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -359,6 +359,13 @@ public void write() { clinit.endMethod(); } + // Write binding variables + for (Map.Entry> binding : globals.getBindings().entrySet()) { + String name = binding.getKey(); + String descriptor = Type.getType(binding.getValue()).getDescriptor(); + visitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd(); + } + // Write any needsVarName methods for used variables for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) { String name = needsMethod.getName(); diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index a3ff479533bd2..444234384c6d3 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -132,24 +132,6 @@ class org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues List getValues() } -# for testing. -# currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods -class org.elasticsearch.painless.FeatureTest no_import { - int z - () - (int,int) - int getX() - int getY() - void setX(int) - void setY(int) - boolean overloadedStatic() - boolean overloadedStatic(boolean) - Object twoFunctionsOfX(Function,Function) - void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) -} - class org.elasticsearch.search.lookup.FieldLookup { def getValue() List getValues() @@ -174,4 +156,26 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Term { class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { int getLength() float getFreq() +} + +# for testing +class org.elasticsearch.painless.FeatureTest no_import { + int z + () + (int,int) + int getX() + int getY() + void setX(int) + void setY(int) + boolean overloadedStatic() + boolean overloadedStatic(boolean) + Object twoFunctionsOfX(Function,Function) + void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentation getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) +} + +# for testing +static { + int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest } \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java index 0b13694524b0c..25866c8d668a3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicAPITests.java @@ -129,4 +129,8 @@ public void testPublicMemberAccess() { assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + "ft.z = 5; return ft.z;")); } + + public void testNoSemicolon() { + assertEquals(true, exec("def x = true; if (x) return x")); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java new file mode 100644 index 0000000000000..4bcc557d3dcff --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ExecutableScript; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class BindingsTests extends ScriptTestCase { + + public void testBasicBinding() { + assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)")); + } + + public void testRepeatedBinding() { + String script = "testAddWithState(4, 5, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + assertEquals(14, executableScript.run()); + + executableScript.setNextVar("test", 4); + assertEquals(13, executableScript.run()); + + executableScript.setNextVar("test", 7); + assertEquals(16, executableScript.run()); + } + + public void testBoundBinding() { + String script = "testAddWithState(4, params.bound, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + executableScript.setNextVar("bound", 1); + assertEquals(10, executableScript.run()); + + executableScript.setNextVar("test", 4); + executableScript.setNextVar("bound", 2); + assertEquals(9, executableScript.run()); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java index d0d0b2165ca10..be3db76bac250 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java @@ -53,7 +53,7 @@ public void testArrayInitializers() { "Object[] x = new Object[] {y, z, 1 + s, s + 'aaa'}; return x;"); assertEquals(4, objects.length); - assertEquals(new Integer(2), objects[0]); + assertEquals(Integer.valueOf(2), objects[0]); assertEquals(new ArrayList(), objects[1]); assertEquals("1aaa", objects[2]); assertEquals("aaaaaa", objects[3]); @@ -85,7 +85,7 @@ public void testListInitializers() { list = (List)exec("int y = 2; List z = new ArrayList(); String s = 'aaa'; List x = [y, z, 1 + s, s + 'aaa']; return x;"); assertEquals(4, list.size()); - assertEquals(new Integer(2), list.get(0)); + assertEquals(Integer.valueOf(2), list.get(0)); assertEquals(new ArrayList(), list.get(1)); assertEquals("1aaa", list.get(2)); assertEquals("aaaaaa", list.get(3)); @@ -100,15 +100,15 @@ public void testMapInitializers() { map = (Map)exec("[5 : 7, -1 : 14]"); assertEquals(2, map.size()); - assertEquals(new Integer(7), map.get(5)); - assertEquals(new Integer(14), map.get(-1)); + assertEquals(Integer.valueOf(7), map.get(5)); + assertEquals(Integer.valueOf(14), map.get(-1)); map = (Map)exec("int y = 2; int z = 3; Map x = [y*z : y + z, y - z : y, z : z]; return x;"); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); - assertEquals(new Integer(2), map.get(-1)); - assertEquals(new Integer(3), map.get(3)); + assertEquals(Integer.valueOf(5), map.get(6)); + assertEquals(Integer.valueOf(2), map.get(-1)); + assertEquals(Integer.valueOf(3), map.get(3)); map = (Map)exec("int y = 2; List z = new ArrayList(); String s = 'aaa';" + "def x = [y : z, 1 + s : s + 'aaa']; return x;"); @@ -139,7 +139,7 @@ public void testCrazyInitializer() { list3.add(9); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); + assertEquals(Integer.valueOf(5), map.get(6)); assertEquals(list2, map.get("s")); assertEquals(list3, map.get(3)); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 8143c39ce6f6b..81c139662e7c8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -278,6 +278,6 @@ public void testBogusRegexFlag() { IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("/asdf/b", false); // Not picky so we get a non-assertion error }); - assertEquals("invalid sequence of tokens near ['b'].", e.getMessage()); + assertEquals("unexpected token ['b'] was expecting one of [{, ';'}].", e.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index f2d93aa759d07..79d2fe0c53deb 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -255,7 +255,7 @@ public void testRCurlyNotDelim() { // We don't want PICKY here so we get the normal error message exec("def i = 1} return 1", emptyMap(), emptyMap(), null, false); }); - assertEquals("invalid sequence of tokens near ['}'].", e.getMessage()); + assertEquals("unexpected token ['}'] was expecting one of [{, ';'}].", e.getMessage()); } public void testBadBoxingCast() { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 4c3c204d2d9ca..617b8df61b6bd 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -95,7 +95,7 @@ setup: field: script: source: "doc.date.get(0)" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12Z' } + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } - do: search: @@ -104,7 +104,7 @@ setup: field: script: source: "doc.date.value" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12Z' } + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } --- "geo_point": diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 3381356da4171..e37a796009137 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.join.JoinUtil; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -125,15 +124,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeInt(maxChildren); out.writeVInt(scoreMode.ordinal()); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToParentChildBWC(out, query, type); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index 4e328ea2c984e..e98fdb9e9699d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -97,15 +96,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(type); out.writeBoolean(score); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToParentChildBWC(out, query, type); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 6593c7efb9fab..5e57a2774055d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -131,7 +131,7 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); } - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 546677a2be4f4..6e4e79d16e5a5 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -196,10 +196,6 @@ protected void doAssertLuceneQuery(HasChildQueryBuilder queryBuilder, Query quer public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { HasChildQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 6d6822007eee3..164405f653444 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -171,10 +171,6 @@ protected void doAssertLuceneQuery(HasParentQueryBuilder queryBuilder, Query que public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { HasParentQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 87b16bc448ef1..5ea0d8312ad04 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.join.ParentJoinPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -58,6 +59,8 @@ protected Collection> transportClientPlugins() { @Override public Settings indexSettings() { Settings.Builder builder = Settings.builder().put(super.indexSettings()) + // AwaitsFix: https://github.com/elastic/elasticsearch/issues/33318 + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) // aggressive filter caching so that we can assert on the filter cache size .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index f18efe4585bc9..445076b8eba07 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -272,11 +272,7 @@ public PercolateQueryBuilder(String field, String documentType, String indexedDo documents = document != null ? Collections.singletonList(document) : Collections.emptyList(); } if (documents.isEmpty() == false) { - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - documentXContentType = in.readEnum(XContentType.class); - } else { - documentXContentType = XContentHelper.xContentType(documents.iterator().next()); - } + documentXContentType = in.readEnum(XContentType.class); } else { documentXContentType = null; } @@ -329,7 +325,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { BytesReference doc = documents.isEmpty() ? null : documents.iterator().next(); out.writeOptionalBytesReference(doc); } - if (documents.isEmpty() == false && out.getVersion().onOrAfter(Version.V_5_3_0)) { + if (documents.isEmpty() == false) { out.writeEnum(documentXContentType); } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e7163edef94c9..eb7af5f30d061 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -36,7 +35,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -57,7 +55,6 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -294,26 +291,6 @@ public void testCreateMultiDocumentSearcher() throws Exception { assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); } - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("P4AAAAAFZmllbGQEdHlwZQAAAAAAAA57ImZvbyI6ImJhciJ9AAAAAA=="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PercolateQueryBuilder queryBuilder = new PercolateQueryBuilder(in); - assertEquals("type", queryBuilder.getDocumentType()); - assertEquals("field", queryBuilder.getField()); - assertEquals("{\"foo\":\"bar\"}", queryBuilder.getDocuments().iterator().next().utf8ToString()); - assertEquals(XContentType.JSON, queryBuilder.getXContentType()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - queryBuilder.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - private static BytesReference randomSource(Set usedFields) { try { // If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index 5e97eadae83e7..1c7ae3681ac63 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -74,7 +74,7 @@ public void testStoringQueryBuilders() throws IOException { BinaryFieldMapper fieldMapper = PercolatorFieldMapper.Builder.createQueryBuilderFieldBuilder( new Mapper.BuilderContext(settings, new ContentPath(0))); - Version version = randomBoolean() ? Version.V_5_6_0 : Version.V_6_0_0_beta2; + Version version = Version.V_6_0_0_beta2; try (IndexWriter indexWriter = new IndexWriter(directory, config)) { for (int i = 0; i < queryBuilders.length; i++) { queryBuilders[i] = new TermQueryBuilder(randomAlphaOfLength(4), randomAlphaOfLength(8)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index a5520c90b0ff5..50d01535d7ff0 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -118,7 +117,7 @@ protected ReindexRequest buildRequest(RestRequest request) throws IOException { throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parameter. " + "Specify it in the [dest] object instead."); } - ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest()); + ReindexRequest internal = new ReindexRequest(); try (XContentParser parser = request.contentParser()) { PARSER.parse(parser, internal, null); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index bf0adc6e1429f..72a2a0d73356b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -63,7 +62,7 @@ protected UpdateByQueryRequest buildRequest(RestRequest request) throws IOExcept * it to set its own defaults which differ from SearchRequest's * defaults. Then the parse can override them. */ - UpdateByQueryRequest internal = new UpdateByQueryRequest(new SearchRequest()); + UpdateByQueryRequest internal = new UpdateByQueryRequest(); Map> consumers = new HashMap<>(); consumers.put("conflicts", o -> internal.setConflicts((String) o)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index e8e3760882eea..d20be74798066 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -61,7 +61,8 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, if (searchRequest.scroll() != null) { TimeValue keepAlive = searchRequest.scroll().keepAlive(); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up because more scroll * timeout seems safer than less. */ @@ -117,7 +118,8 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) { fields.append(',').append(searchRequest.source().storedFields().fieldNames().get(i)); } - String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; + // V_5_0_0 + String storedFieldsParamName = remoteVersion.before(Version.fromId(5000099)) ? "fields" : "stored_fields"; request.addParameter(storedFieldsParamName, fields.toString()); } @@ -186,7 +188,8 @@ private static String sortToUri(SortBuilder sort) { static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) { Request request = new Request("POST", "/_search/scroll"); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up so we shouldn't end up * with 0s. */ diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java index 4611f9dcbcddb..ec34da777b533 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; /** @@ -73,7 +72,7 @@ protected TestAction action() { @Override protected ReindexRequest request() { - return new ReindexRequest(new SearchRequest(), new IndexRequest()); + return new ReindexRequest(); } private class TestAction extends TransportReindexAction.AsyncIndexBySearchAction { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java index 6d3ce558c7567..a90b60357c4ff 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; @@ -100,7 +99,7 @@ public void testSetRouting() throws Exception { @Override protected ReindexRequest request() { - return new ReindexRequest(new SearchRequest(), new IndexRequest()); + return new ReindexRequest(); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java index f580b1400c3bd..73745ca690d74 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -30,7 +31,7 @@ public class ReindexWithoutContentIT extends ESRestTestCase { public void testReindexMissingBody() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest( - "POST", "/_reindex")); + new Request("POST", "/_reindex"))); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("request body is required")); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index b06948b90581a..70e29ed12c5b4 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -144,7 +142,7 @@ public void testReindexFromRemoteRequestParsing() throws IOException { request = BytesReference.bytes(b); } try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { - ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest()); + ReindexRequest r = new ReindexRequest(); RestReindexAction.PARSER.parse(p, r, null); assertEquals("localhost", r.getRemoteInfo().getHost()); assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 97809c9bc8dc3..30621ab607bf2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -47,7 +46,7 @@ */ public class RoundTripTests extends ESTestCase { public void testReindexRequest() throws IOException { - ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest()); + ReindexRequest reindex = new ReindexRequest(); randomRequest(reindex); reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L)); reindex.getDestination().index("test"); @@ -82,7 +81,7 @@ public void testReindexRequest() throws IOException { } public void testUpdateByQueryRequest() throws IOException { - UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest()); + UpdateByQueryRequest update = new UpdateByQueryRequest(); randomRequest(update); if (randomBoolean()) { update.setPipeline(randomAlphaOfLength(5)); @@ -155,13 +154,8 @@ private void assertRequestEquals(Version version, ReindexRequest request, Reinde assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - if (version.onOrAfter(Version.V_5_2_0)) { - assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); - } else { - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, tripped.getRemoteInfo().getConnectTimeout()); - } + assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); + assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index b688ce019e3df..d3f62af907d9f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; public class UpdateByQueryMetadataTests @@ -39,7 +38,7 @@ protected TestAction action() { @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 4006d16fbcb11..8c9744aa0dd92 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; @@ -50,7 +49,7 @@ public void testModifyingCtxNotAllowed() { @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index b51525f20e3c2..2f801811327b8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -136,13 +136,15 @@ public void testInitialSearchParamsFields() { // Test stored_fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id)); + // V_5_0_0_alpha4 => current + remoteVersion = Version.fromId(between(5000004, Version.CURRENT.id)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("stored_fields", "_source,_id")); // Test fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(2000099, Version.V_5_0_0_alpha4_ID - 1)); + // V_2_0_0 => V_5_0_0_alpha3 + remoteVersion = Version.fromId(between(2000099, 5000003)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("fields", "_source,_id")); // Test extra fields for versions that need it @@ -190,7 +192,8 @@ public void testInitialSearchParamsMisc() { } private void assertScroll(Version remoteVersion, Map params, TimeValue requested) { - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { // Versions of Elasticsearch prior to 5.0 can't parse nanos or micros in TimeValue. assertThat(params.get("scroll"), not(either(endsWith("nanos")).or(endsWith("micros")))); if (requested.getStringRep().endsWith("nanos") || requested.getStringRep().endsWith("micros")) { @@ -242,7 +245,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); - HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.V_5_0_0).getEntity(); + HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); @@ -255,7 +258,7 @@ public void testScrollEntity() throws IOException { public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); - Request request = clearScroll(scroll, Version.V_5_0_0); + Request request = clearScroll(scroll, Version.fromString("5.0.0")); assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 92f370f8f6364..d3d3cefea45e1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -150,13 +150,15 @@ public void testLookupRemoteVersion() throws Exception { assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/with_unknown_fields.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 12ce5ce7d4a8f..e7c36ff506ed3 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -83,7 +83,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -163,3 +162,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' ] + +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 981a417449f14..73135c2a14560 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -21,11 +21,11 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelPromise; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpResponse; -import org.elasticsearch.transport.netty4.Netty4Utils; import java.net.InetSocketAddress; @@ -42,7 +42,7 @@ public class Netty4HttpChannel implements HttpChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); @@ -59,7 +59,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index ab078ad10d337..472e34d09fc40 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -27,7 +27,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.http.HttpPipelinedRequest; -import org.elasticsearch.transport.netty4.Netty4Utils; @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { @@ -58,7 +57,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest listener) listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java index 873a6c33fba11..9ef3f296f0601 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpServerChannel; @@ -41,7 +42,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index e310f3012a9fb..0edd12a44e8c1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -38,7 +38,9 @@ import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -222,11 +224,12 @@ protected ChannelHandler getClientChannelInitializer() { static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); @Override - protected Netty4TcpChannel initiateChannel(InetSocketAddress address, ActionListener listener) throws IOException { + protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener listener) throws IOException { + InetSocketAddress address = node.getAddress().address(); ChannelFuture channelFuture = bootstrap.connect(address); Channel channel = channelFuture.channel(); if (channel == null) { - Netty4Utils.maybeDie(channelFuture.cause()); + ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause()); throw new IOException(channelFuture.cause()); } addClosedExceptionLogger(channel); @@ -240,7 +243,7 @@ protected Netty4TcpChannel initiateChannel(InetSocketAddress address, ActionList } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); listener.onFailure(new Exception(cause)); } else { listener.onFailure((Exception) cause); @@ -305,7 +308,7 @@ protected void initChannel(Channel ch) throws Exception { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); super.exceptionCaught(ctx, cause); } } @@ -331,7 +334,7 @@ protected void initChannel(Channel ch) throws Exception { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); super.exceptionCaught(ctx, cause); } } @@ -349,7 +352,7 @@ private class ServerChannelExceptionHandler extends ChannelHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get(); if (cause instanceof Error) { onServerException(serverChannel, new Exception(cause)); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 9470424b381e6..655dafdd28981 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -27,20 +27,16 @@ import io.netty.util.NettyRuntime; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; public class Netty4Utils { @@ -161,34 +157,4 @@ public static void closeChannels(final Collection channels) throws IOEx } } - /** - * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. - * - * @param cause the throwable to test - */ - public static void maybeDie(final Throwable cause) { - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); - final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); - if (maybeError.isPresent()) { - /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. - */ - try { - // try to log the current stack trace - final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); - logger.error("fatal error on the network layer\n{}", formatted); - } finally { - new Thread( - () -> { - throw maybeError.get(); - }) - .start(); - } - } - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index 17a62b3a440ef..cfda71f10096e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -32,7 +32,6 @@ import java.io.IOException; import java.nio.charset.Charset; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; @@ -71,7 +70,7 @@ public void testBadRequest() throws IOException { final ResponseException e = expectThrows( ResponseException.class, - () -> client().performRequest(randomFrom("GET", "POST", "PUT"), path, Collections.emptyMap())); + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path))); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 8d628ace2ee38..9d6f016086c26 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -87,13 +86,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - final Netty4Transport t = (Netty4Transport) transport; - final TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 1883e3bf1b9d6..a42a28cad4e25 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -20,9 +22,10 @@ esplugin { description 'The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.' classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' + hasClientJar = true } -forbiddenApis { +tasks.withType(ForbiddenApisCliTask) { signatures += [ "com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead" ] diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java index 0b84f538dcd79..3f8b9296aa02c 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java @@ -22,6 +22,7 @@ import com.ibm.icu.lang.UCharacter; import com.ibm.icu.lang.UProperty; import com.ibm.icu.lang.UScript; +import com.ibm.icu.text.BreakIterator; import com.ibm.icu.text.RuleBasedBreakIterator; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.icu.segmentation.DefaultICUTokenizerConfig; @@ -79,7 +80,7 @@ private ICUTokenizerConfig getIcuConfig(Environment env, Settings settings) { if (tailored.isEmpty()) { return null; } else { - final RuleBasedBreakIterator breakers[] = new RuleBasedBreakIterator[UScript.CODE_LIMIT]; + final BreakIterator breakers[] = new BreakIterator[UScript.CODE_LIMIT]; for (Map.Entry entry : tailored.entrySet()) { int code = entry.getKey(); String resourcePath = entry.getValue(); @@ -104,7 +105,7 @@ public RuleBasedBreakIterator getBreakIterator(int script) { } //parse a single RBBi rule file - private RuleBasedBreakIterator parseRules(String filename, Environment env) throws IOException { + private BreakIterator parseRules(String filename, Environment env) throws IOException { final Path path = env.configFile().resolve(filename); String rules = Files.readAllLines(path) diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index c4c44222f470e..0235e6e81368f 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -25,7 +25,6 @@ import com.ibm.icu.util.ULocale; import org.apache.lucene.document.Field; -import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -35,7 +34,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -56,7 +54,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.function.BiFunction; import java.util.function.LongSupplier; public class ICUCollationKeywordFieldMapper extends FieldMapper { @@ -571,7 +568,6 @@ public static class TypeParser implements Mapper.TypeParser { private final String variableTop; private final boolean hiraganaQuaternaryMode; private final Collator collator; - private final BiFunction getDVField; protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, String rules, String language, @@ -593,11 +589,6 @@ protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fiel this.variableTop = variableTop; this.hiraganaQuaternaryMode = hiraganaQuaternaryMode; this.collator = collator; - if (indexCreatedVersion.onOrAfter(Version.V_5_6_0)) { - getDVField = SortedSetDocValuesField::new; - } else { - getDVField = SortedDocValuesField::new; - } } @Override @@ -754,7 +745,7 @@ protected void parseCreateField(ParseContext context, List field } if (fieldType().hasDocValues()) { - fields.add(getDVField.apply(fieldType().name(), binaryValue)); + fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); } else if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { createFieldNamesField(context, fields); } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index fff255970113d..f39ae886dc45b 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -28,11 +28,9 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -106,50 +104,6 @@ public void testDefaults() throws Exception { assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); } - public void testBackCompat() throws Exception { - indexService = createIndex("oldindex", Settings.builder().put("index.version.created", Version.V_5_5_0).build()); - parser = indexService.mapperService().documentMapperParser(); - - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() - .endObject().endObject()); - - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - - assertEquals(mapping, mapper.mappingSource().toString()); - - ParsedDocument doc = mapper.parse(SourceToParse.source("oldindex", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "1234") - .endObject()), - XContentType.JSON)); - - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - - Collator collator = Collator.getInstance(ULocale.ROOT); - RawCollationKey key = collator.getRawCollationKey("1234", null); - BytesRef expected = new BytesRef(key.bytes, 0, key.size); - - assertEquals(expected, fields[0].binaryValue()); - IndexableFieldType fieldType = fields[0].fieldType(); - assertThat(fieldType.omitNorms(), equalTo(true)); - assertFalse(fieldType.tokenized()); - assertFalse(fieldType.stored()); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); - assertThat(fieldType.storeTermVectors(), equalTo(false)); - assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); - assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); - assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); - assertEquals(DocValuesType.NONE, fieldType.docValuesType()); - - assertEquals(expected, fields[1].binaryValue()); - fieldType = fields[1].fieldType(); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE)); - assertEquals(DocValuesType.SORTED, fieldType.docValuesType()); - } - public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 6f177f7b7f5b2..3dae3d3642c54 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -128,7 +128,7 @@ thirdPartyAudit.excludes = [ ] // jarhell with jdk (intentionally, because jaxb was removed from default modules in java 9) -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.Binder', 'javax.xml.bind.ContextFinder$1', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index b1c3b62fd6edf..e32ba6948d62d 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -87,7 +87,7 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.DatatypeConverter', 'javax.xml.bind.JAXBContext' diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 4d26447078597..48fa49b9a8a35 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,39 +19,33 @@ package org.elasticsearch.discovery.file; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.TransportService; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; -/** - * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts - * is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in - * the {@link Environment#configFile()}/discovery-file directory. - */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private final Settings settings; - private final Path configPath; + private final DeprecationLogger deprecationLogger; + static final String DEPRECATION_MESSAGE + = "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin"; - public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { - this.settings = settings; - this.configPath = configPath; + public FileBasedDiscoveryPlugin(Settings settings) { + deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings)); } @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap( - "file", - () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); + deprecationLogger.deprecated(DEPRECATION_MESSAGE); + return Collections.emptyMap(); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java deleted file mode 100644 index 584ae4de5a2b5..0000000000000 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * An implementation of {@link UnicastHostsProvider} that reads hosts/ports - * from {@link #UNICAST_HOSTS_FILE}. - * - * Each unicast host/port that is part of the discovery process must be listed on - * a separate line. If the port is left off an entry, a default port of 9300 is - * assumed. An example unicast hosts file could read: - * - * 67.81.244.10 - * 67.81.244.11:9305 - * 67.81.244.15:9400 - */ -class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - - static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - - private final Path unicastHostsFilePath; - - FileBasedUnicastHostsProvider(Environment environment) { - super(environment.settings()); - this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - } - - @Override - public List buildDynamicHosts(HostsResolver hostsResolver) { - List hostsList; - try (Stream lines = Files.lines(unicastHostsFilePath)) { - hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments - .collect(Collectors.toList()); - } catch (FileNotFoundException | NoSuchFileException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } - - final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); - logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; - } - -} diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java new file mode 100644 index 0000000000000..643c7b2c95c27 --- /dev/null +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.file; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE; + +public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase { + public void testDeprecationWarning() { + new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null); + assertWarnings(DEPRECATION_MESSAGE); + } +} diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index e0e728cec2427..3caf29c8513b5 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'custom-settings' description 'An example plugin showing how to register custom settings' classname 'org.elasticsearch.example.customsettings.ExampleCustomSettingsPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } integTestCluster { diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index b36d5cd218d27..977e467391d8b 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -23,6 +23,8 @@ esplugin { name 'custom-suggester' description 'An example plugin showing how to write and register a custom suggester' classname 'org.elasticsearch.example.customsuggester.CustomSuggesterPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } integTestCluster { @@ -30,4 +32,4 @@ integTestCluster { } // this plugin has no unit tests, only rest tests -tasks.test.enabled = false \ No newline at end of file +tasks.test.enabled = false diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index ef1ca7d741e9a..cb2aeb82e9d01 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { @@ -24,10 +23,12 @@ esplugin { description 'An example whitelisting additional classes and methods in painless' classname 'org.elasticsearch.example.painlesswhitelist.MyWhitelistPlugin' extendedPlugins = ['lang-painless'] + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } dependencies { - compileOnly project(':modules:lang-painless') + compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" } if (System.getProperty('tests.distribution') == null) { diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index 4adeb0c721baf..cdecd760c81e8 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -16,11 +16,13 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'example-rescore' description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' classname 'org.elasticsearch.example.rescore.ExampleRescorePlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } + diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index cfe84e6a45a93..eff2fd1b6c6e4 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'rest-handler' description 'An example plugin showing how to register a REST handler' classname 'org.elasticsearch.example.resthandler.ExampleRestHandlerPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } // No unit tests in this example @@ -40,4 +41,4 @@ integTestCluster { } integTestRunner { systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" -} +} \ No newline at end of file diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index 7c602d9bc027d..e9da62acdcff4 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -16,13 +16,15 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'script-expert-scoring' description 'An example script engine to use low level Lucene internals for expert scoring' classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } test.enabled = false + diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1a6aa809de040..f55104f2a96fc 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -2106,7 +2106,27 @@ thirdPartyAudit.excludes = [ 'ucar.nc2.dataset.NetcdfDataset' ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion == JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + // TODO: Why is this needed ? + 'com.sun.javadoc.ClassDoc', + 'com.sun.javadoc.Doc', + 'com.sun.javadoc.Doclet', + 'com.sun.javadoc.ExecutableMemberDoc', + 'com.sun.javadoc.FieldDoc', + 'com.sun.javadoc.MethodDoc', + 'com.sun.javadoc.PackageDoc', + 'com.sun.javadoc.Parameter', + 'com.sun.javadoc.ProgramElementDoc', + 'com.sun.javadoc.RootDoc', + 'com.sun.javadoc.SourcePosition', + 'com.sun.javadoc.Tag', + 'com.sun.javadoc.Type', + 'com.sun.tools.javadoc.Main' + ] +} + +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.activation.ActivationDataFlavor', 'javax.activation.CommandMap', @@ -2121,3 +2141,9 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { 'javax.xml.bind.Unmarshaller' ] } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index a6dc27b1f8a1c..50af824fae9bd 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -93,10 +92,6 @@ public static class TypeParser implements Mapper.TypeParser { throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); } - if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) { - node.remove("precision_step"); - } - TypeParsers.parseField(builder, name, node, parserContext); return builder; diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 04ab7ecd245f6..ac5afeb3a1094 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -82,10 +82,6 @@ public Builder enabled(EnabledAttributeMapper enabled) { @Override public SizeFieldMapper build(BuilderContext context) { setupFieldType(context); - if (context.indexCreatedVersion().onOrBefore(Version.V_5_0_0_alpha4)) { - // Make sure that the doc_values are disabled on indices created before V_5_0_0_alpha4 - fieldType.setHasDocValues(false); - } return new SizeFieldMapper(enabledState, fieldType, context.indexSettings()); } } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 07ef4b4be5e62..510c101379d2f 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -23,28 +23,38 @@ esplugin { } dependencies { - compile 'com.google.cloud:google-cloud-storage:1.28.0' - compile 'com.google.cloud:google-cloud-core:1.28.0' - compile 'com.google.cloud:google-cloud-core-http:1.28.0' - compile 'com.google.auth:google-auth-library-oauth2-http:0.9.1' - compile 'com.google.auth:google-auth-library-credentials:0.9.1' - compile 'com.google.oauth-client:google-oauth-client:1.23.0' - compile 'com.google.http-client:google-http-client:1.23.0' - compile 'com.google.http-client:google-http-client-jackson:1.23.0' - compile 'com.google.http-client:google-http-client-jackson2:1.23.0' - compile 'com.google.http-client:google-http-client-appengine:1.23.0' - compile 'com.google.api-client:google-api-client:1.23.0' - compile 'com.google.api:gax:1.25.0' - compile 'com.google.api:gax-httpjson:0.40.0' - compile 'com.google.api:api-common:1.5.0' - compile 'com.google.api.grpc:proto-google-common-protos:1.8.0' + compile 'com.google.cloud:google-cloud-storage:1.40.0' + compile 'com.google.cloud:google-cloud-core:1.40.0' compile 'com.google.guava:guava:20.0' - compile 'com.google.apis:google-api-services-storage:v1-rev115-1.23.0' - compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' - compile 'io.grpc:grpc-context:1.9.0' - compile 'io.opencensus:opencensus-api:0.11.1' - compile 'io.opencensus:opencensus-contrib-http-util:0.11.1' - compile 'org.threeten:threetenbp:1.3.6' + compile 'joda-time:joda-time:2.10' + compile 'com.google.http-client:google-http-client:1.24.1' + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile 'com.google.api:api-common:1.7.0' + compile 'com.google.api:gax:1.30.0' + compile 'org.threeten:threetenbp:1.3.3' + compile 'com.google.protobuf:protobuf-java-util:3.6.0' + compile 'com.google.protobuf:protobuf-java:3.6.0' + compile 'com.google.code.gson:gson:2.7' + compile 'com.google.api.grpc:proto-google-common-protos:1.12.0' + compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + compile 'com.google.cloud:google-cloud-core-http:1.40.0' + compile 'com.google.auth:google-auth-library-credentials:0.10.0' + compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0' + compile 'com.google.oauth-client:google-oauth-client:1.24.1' + compile 'com.google.api-client:google-api-client:1.24.1' + compile 'com.google.http-client:google-http-client-appengine:1.24.1' + compile 'com.google.http-client:google-http-client-jackson:1.24.1' + compile 'org.codehaus.jackson:jackson-core-asl:1.9.11' + compile 'com.google.http-client:google-http-client-jackson2:1.24.1' + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile 'com.google.api:gax-httpjson:0.47.0' + compile 'io.opencensus:opencensus-api:0.15.0' + compile 'io.grpc:grpc-context:1.12.0' + compile 'io.opencensus:opencensus-contrib-http-util:0.15.0' + compile 'com.google.apis:google-api-services-storage:v1-rev135-1.24.1' } dependencyLicenses { @@ -52,10 +62,18 @@ dependencyLicenses { mapping from: /google-auth-.*/, to: 'google-auth' mapping from: /google-http-.*/, to: 'google-http' mapping from: /opencensus.*/, to: 'opencensus' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /http.*/, to: 'httpclient' + mapping from: /protobuf.*/, to: 'protobuf' + mapping from: /proto-google.*/, to: 'proto-google' } thirdPartyAudit.excludes = [ // uses internal java api: sun.misc.Unsafe + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', @@ -87,139 +105,13 @@ thirdPartyAudit.excludes = [ 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', - 'com.google.gson.Gson', - 'com.google.gson.GsonBuilder', - 'com.google.gson.TypeAdapter', - 'com.google.gson.stream.JsonReader', - 'com.google.gson.stream.JsonWriter', - 'com.google.iam.v1.Binding$Builder', - 'com.google.iam.v1.Binding', - 'com.google.iam.v1.Policy$Builder', - 'com.google.iam.v1.Policy', - 'com.google.protobuf.AbstractMessageLite$Builder', - 'com.google.protobuf.AbstractParser', - 'com.google.protobuf.Any$Builder', - 'com.google.protobuf.Any', - 'com.google.protobuf.AnyOrBuilder', - 'com.google.protobuf.AnyProto', - 'com.google.protobuf.Api$Builder', - 'com.google.protobuf.Api', - 'com.google.protobuf.ApiOrBuilder', - 'com.google.protobuf.ApiProto', - 'com.google.protobuf.ByteString', - 'com.google.protobuf.CodedInputStream', - 'com.google.protobuf.CodedOutputStream', - 'com.google.protobuf.DescriptorProtos', - 'com.google.protobuf.Descriptors$Descriptor', - 'com.google.protobuf.Descriptors$EnumDescriptor', - 'com.google.protobuf.Descriptors$EnumValueDescriptor', - 'com.google.protobuf.Descriptors$FieldDescriptor', - 'com.google.protobuf.Descriptors$FileDescriptor$InternalDescriptorAssigner', - 'com.google.protobuf.Descriptors$FileDescriptor', - 'com.google.protobuf.Descriptors$OneofDescriptor', - 'com.google.protobuf.Duration$Builder', - 'com.google.protobuf.Duration', - 'com.google.protobuf.DurationOrBuilder', - 'com.google.protobuf.DurationProto', - 'com.google.protobuf.EmptyProto', - 'com.google.protobuf.Enum$Builder', - 'com.google.protobuf.Enum', - 'com.google.protobuf.EnumOrBuilder', - 'com.google.protobuf.ExtensionRegistry', - 'com.google.protobuf.ExtensionRegistryLite', - 'com.google.protobuf.FloatValue$Builder', - 'com.google.protobuf.FloatValue', - 'com.google.protobuf.FloatValueOrBuilder', - 'com.google.protobuf.GeneratedMessage$GeneratedExtension', - 'com.google.protobuf.GeneratedMessage', - 'com.google.protobuf.GeneratedMessageV3$Builder', - 'com.google.protobuf.GeneratedMessageV3$BuilderParent', - 'com.google.protobuf.GeneratedMessageV3$FieldAccessorTable', - 'com.google.protobuf.GeneratedMessageV3', - 'com.google.protobuf.Internal$EnumLite', - 'com.google.protobuf.Internal$EnumLiteMap', - 'com.google.protobuf.Internal', - 'com.google.protobuf.InvalidProtocolBufferException', - 'com.google.protobuf.LazyStringArrayList', - 'com.google.protobuf.LazyStringList', - 'com.google.protobuf.MapEntry$Builder', - 'com.google.protobuf.MapEntry', - 'com.google.protobuf.MapField', - 'com.google.protobuf.Message', - 'com.google.protobuf.MessageOrBuilder', - 'com.google.protobuf.Parser', - 'com.google.protobuf.ProtocolMessageEnum', - 'com.google.protobuf.ProtocolStringList', - 'com.google.protobuf.RepeatedFieldBuilderV3', - 'com.google.protobuf.SingleFieldBuilderV3', - 'com.google.protobuf.Struct$Builder', - 'com.google.protobuf.Struct', - 'com.google.protobuf.StructOrBuilder', - 'com.google.protobuf.StructProto', - 'com.google.protobuf.Timestamp$Builder', - 'com.google.protobuf.Timestamp', - 'com.google.protobuf.TimestampProto', - 'com.google.protobuf.Type$Builder', - 'com.google.protobuf.Type', - 'com.google.protobuf.TypeOrBuilder', - 'com.google.protobuf.TypeProto', - 'com.google.protobuf.UInt32Value$Builder', - 'com.google.protobuf.UInt32Value', - 'com.google.protobuf.UInt32ValueOrBuilder', - 'com.google.protobuf.UnknownFieldSet$Builder', - 'com.google.protobuf.UnknownFieldSet', - 'com.google.protobuf.WireFormat$FieldType', - 'com.google.protobuf.WrappersProto', - 'com.google.protobuf.util.Timestamps', - 'org.apache.http.ConnectionReuseStrategy', - 'org.apache.http.Header', - 'org.apache.http.HttpEntity', - 'org.apache.http.HttpEntityEnclosingRequest', - 'org.apache.http.HttpHost', - 'org.apache.http.HttpRequest', - 'org.apache.http.HttpResponse', - 'org.apache.http.HttpVersion', - 'org.apache.http.RequestLine', - 'org.apache.http.StatusLine', - 'org.apache.http.client.AuthenticationHandler', - 'org.apache.http.client.HttpClient', - 'org.apache.http.client.HttpRequestRetryHandler', - 'org.apache.http.client.RedirectHandler', - 'org.apache.http.client.RequestDirector', - 'org.apache.http.client.UserTokenHandler', - 'org.apache.http.client.methods.HttpDelete', - 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', - 'org.apache.http.client.methods.HttpGet', - 'org.apache.http.client.methods.HttpHead', - 'org.apache.http.client.methods.HttpOptions', - 'org.apache.http.client.methods.HttpPost', - 'org.apache.http.client.methods.HttpPut', - 'org.apache.http.client.methods.HttpRequestBase', - 'org.apache.http.client.methods.HttpTrace', - 'org.apache.http.conn.ClientConnectionManager', - 'org.apache.http.conn.ConnectionKeepAliveStrategy', - 'org.apache.http.conn.params.ConnManagerParams', - 'org.apache.http.conn.params.ConnPerRouteBean', - 'org.apache.http.conn.params.ConnRouteParams', - 'org.apache.http.conn.routing.HttpRoutePlanner', - 'org.apache.http.conn.scheme.PlainSocketFactory', - 'org.apache.http.conn.scheme.Scheme', - 'org.apache.http.conn.scheme.SchemeRegistry', - 'org.apache.http.conn.ssl.SSLSocketFactory', - 'org.apache.http.conn.ssl.X509HostnameVerifier', - 'org.apache.http.entity.AbstractHttpEntity', - 'org.apache.http.impl.client.DefaultHttpClient', - 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', - 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', - 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', - 'org.apache.http.message.BasicHttpResponse', - 'org.apache.http.params.BasicHttpParams', - 'org.apache.http.params.HttpConnectionParams', - 'org.apache.http.params.HttpParams', - 'org.apache.http.params.HttpProtocolParams', - 'org.apache.http.protocol.HttpContext', - 'org.apache.http.protocol.HttpProcessor', - 'org.apache.http.protocol.HttpRequestExecutor' + // commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + // commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' ] check { diff --git a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 deleted file mode 100644 index 64435356e5eaf..0000000000000 --- a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e537338d40a57ad469239acb6d828fa544fb52b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..67291b658e5c5 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 @@ -0,0 +1 @@ +ea59fb8b2450999345035dec8a6f472543391766 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 0000000000000..3fe8682a1b0f9 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-codec-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-codec-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-logging-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-logging-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 deleted file mode 100644 index 594177047c140..0000000000000 --- a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36ab73c0b5d4a67447eb89a3174cc76ced150bd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 new file mode 100644 index 0000000000000..d6d2bb20ed840 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 @@ -0,0 +1 @@ +58fa2feb11b092be0a6ebe705a28736f12374230 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 deleted file mode 100644 index c251ea1dd956c..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb4bafbfd45b9d24efbb6138a31e37918fac015f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 new file mode 100644 index 0000000000000..fdc722d1520d6 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 @@ -0,0 +1 @@ +d096f3142eb3adbf877588d1044895d148d9efcb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b91f..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..27dafe58a0182 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +37de23fb9b8b077de4ecec3192d98e752b0e5d72 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 deleted file mode 100644 index 9f6f77ada3a69..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba4fb6c5dc8d5ad94dedd9927ceee10a31a59abd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..e3042ee6ea07e --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 @@ -0,0 +1 @@ +28d3d391dfc7e7e7951760708ad2f48cecacf38f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 new file mode 100644 index 0000000000000..c8258d69326b8 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 @@ -0,0 +1 @@ +f981288bd84fe6d140ed70d1d8dbe994a64fa3cc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 deleted file mode 100644 index 0922a53d2e356..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25e0f45f3b3d1b4fccc8944845e51a7a4f359652 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 new file mode 100644 index 0000000000000..f55ef7c9c2150 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 @@ -0,0 +1 @@ +c079a62086121973a23d90f54e2b8c13050fa39d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 deleted file mode 100644 index 100a44c187218..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0fe3a39b0f28d59de1986b3c50f018cd7cb9ec2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 deleted file mode 100644 index 071533f227839..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0e88c78ce17c92d76bf46345faf3fa68833b216 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..7562ead12e9f9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 @@ -0,0 +1 @@ +4985701f989030e262cf8f4e38cc954115f5b082 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 deleted file mode 100644 index fed3fc257c32c..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b4559a9513abd98da50958c56a10f8ae00cb0f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..2761bfdc745c6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 @@ -0,0 +1 @@ +67f5806beda32894f1e6c9527925b64199fd2e4f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 deleted file mode 100644 index f49152ea05646..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -226019ae816b42c59f1b06999aeeb73722b87200 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..33e83b73712f7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 @@ -0,0 +1 @@ +fabefef46f07d1e334123f0de17702708b4dfbd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a15f..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..46b99f23e470a --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +396eac8d3fb1332675f82b208f48a469d64f3b4a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 deleted file mode 100644 index 823c3a85089a5..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eda0d0f758c1cc525866e52e1226c4eb579d130 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..e39f63fe33ae3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 @@ -0,0 +1 @@ +8535031ae10bf6a196e68f25e10c0d6382699cb6 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 deleted file mode 100644 index 85ba0ab798d05..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a72ea3a197937ef63a893e73df312dac0d813663 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..f6b9694abaa6c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 @@ -0,0 +1 @@ +02c88e77c14effdda76f02a0eac968de74e0bd4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f04..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..634b7d9198c8e --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 @@ -0,0 +1 @@ +2ad1dffd8a450055e68d8004fe003033b751d761 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 deleted file mode 100644 index 036812b88b5e0..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e57ea1e2220bda5a2bd24ff17860212861f3c5cf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..2d89939674a51 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +7b0e0218b96808868c23a7d0b40566a713931d9f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..57f37a81c960f --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 @@ -0,0 +1 @@ +5b63a170b786051a42cce08118d5ea3c8f60f749 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 deleted file mode 100644 index 02bac0e492074..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28b0836f48c9705abf73829bbc536dba29a1329a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 new file mode 100644 index 0000000000000..b3433f306eb3f --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 @@ -0,0 +1 @@ +751f548c85fa49f330cecbb1875893f971b33c4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt b/plugins/repository-gcs/licenses/gson-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt rename to plugins/repository-gcs/licenses/gson-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt b/plugins/repository-gcs/licenses/gson-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt rename to plugins/repository-gcs/licenses/gson-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..6937112a09fb6 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt rename to plugins/repository-gcs/licenses/httpclient-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/httpclient-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt rename to plugins/repository-gcs/licenses/httpclient-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 new file mode 100644 index 0000000000000..581726601745b --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 @@ -0,0 +1 @@ +e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-LICENSE b/plugins/repository-gcs/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-gcs/licenses/jackson-NOTICE b/plugins/repository-gcs/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 new file mode 100644 index 0000000000000..ed70030899aa0 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 @@ -0,0 +1 @@ +e32303ef8bd18a5c9272780d49b81c95e05ddf43 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 deleted file mode 100644 index c5016bf828d60..0000000000000 --- a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c304d70f42f832e0a86d45bd437f692129299a4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/google-LICENSE.txt b/plugins/repository-gcs/licenses/old/google-LICENSE.txt deleted file mode 100644 index 980a15ac24eeb..0000000000000 --- a/plugins/repository-gcs/licenses/old/google-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/repository-gcs/licenses/old/google-NOTICE.txt b/plugins/repository-gcs/licenses/old/google-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3fce..0000000000000 --- a/plugins/repository-gcs/licenses/old/google-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt deleted file mode 100644 index 72819a9f06f2a..0000000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt +++ /dev/null @@ -1,241 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -========================================================================= - -This project contains annotations in the package org.apache.http.annotation -which are derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) -Full text: http://creativecommons.org/licenses/by/2.5/legalcode - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. - -1. Definitions - - "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. - "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. - "Licensor" means the individual or entity that offers the Work under the terms of this License. - "Original Author" means the individual or entity who created the Work. - "Work" means the copyrightable work of authorship offered under the terms of this License. - "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. - -2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: - - to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; - to create and reproduce Derivative Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. - - For the avoidance of doubt, where the work is a musical composition: - Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. - Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). - Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). - -The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. - -4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: - - You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. - If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. - Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. - -8. Miscellaneous - - Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. - Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. - If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. - This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt deleted file mode 100644 index c0be50a505ec1..0000000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt +++ /dev/null @@ -1,8 +0,0 @@ -Apache HttpComponents Core -Copyright 2005-2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 deleted file mode 100644 index 61d8e3b148144..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54689fbf750a7f26e34fa1f1f96b883c53f51486 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 new file mode 100644 index 0000000000000..e200e2e24a7df --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 @@ -0,0 +1 @@ +9a098392b287d7924660837f4eba0ce252013683 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 deleted file mode 100644 index c0b04f0f8ccce..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82e572b41e81ecf58d0d1e9a3953a05aa8f9c84b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 new file mode 100644 index 0000000000000..b642e1ebebd59 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 @@ -0,0 +1 @@ +d88690591669d9b5ba6d91d9eac7736e58ccf3da \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt b/plugins/repository-gcs/licenses/proto-google-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt rename to plugins/repository-gcs/licenses/proto-google-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt b/plugins/repository-gcs/licenses/proto-google-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt rename to plugins/repository-gcs/licenses/proto-google-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..47f3c178a68c6 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 @@ -0,0 +1 @@ +1140cc74df039deb044ed0e320035e674dc13062 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 deleted file mode 100644 index 0a2dee4447e92..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3282312ba82536fc9a7778cabfde149a875e877 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 new file mode 100644 index 0000000000000..2bfae3456d499 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 @@ -0,0 +1 @@ +ea312c0250a5d0a7cdd1b20bc2c3259938b79855 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-LICENSE.txt b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-NOTICE.txt b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 new file mode 100644 index 0000000000000..050ebd44c9282 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 @@ -0,0 +1 @@ +5333f7e422744d76840c08a106e28e519fbe3acd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 new file mode 100644 index 0000000000000..cc85974499a65 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 @@ -0,0 +1 @@ +3680d0042d4fe0b95ada844ff24da0698a7f0773 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 new file mode 100644 index 0000000000000..9273043e14520 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 @@ -0,0 +1 @@ +3ea31c96676ff12ab56be0b1af6fff61d1a4f1f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 deleted file mode 100644 index 65c16fed4a07b..0000000000000 --- a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89dcc04a7e028c3c963413a71f950703cf51f057 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6debaf5282fc1..557dcaa5faedc 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -582,6 +582,25 @@ thirdPartyAudit.excludes = [ 'com.squareup.okhttp.ResponseBody' ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.annotation.adapters.HexBinaryAdapter'] } + +if (project.runtimeJavaVersion == JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + // TODO: Why is this needed ? + 'com.sun.javadoc.AnnotationDesc', + 'com.sun.javadoc.AnnotationTypeDoc', + 'com.sun.javadoc.ClassDoc', + 'com.sun.javadoc.ConstructorDoc', + 'com.sun.javadoc.Doc', + 'com.sun.javadoc.DocErrorReporter', + 'com.sun.javadoc.FieldDoc', + 'com.sun.javadoc.LanguageVersion', + 'com.sun.javadoc.MethodDoc', + 'com.sun.javadoc.PackageDoc', + 'com.sun.javadoc.ProgramElementDoc', + 'com.sun.javadoc.RootDoc', + 'com.sun.tools.doclets.standard.Standard' + ] +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 7f0ca209db797..5d248b22caf16 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -447,7 +447,7 @@ thirdPartyAudit.excludes = [ ] // jarhell with jdk (intentionally, because jaxb was removed from default modules in java 9) -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.Binder', 'javax.xml.bind.ContextFinder$1', diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 07605bfee29b3..cb8916b857c23 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -62,7 +62,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -141,4 +140,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.BufferAllocator', 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' -] \ No newline at end of file +] +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 3dcd59cf8e28c..17a5c1fb97e80 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -139,7 +139,7 @@ private void handleRequest(Object msg) { if (request.decoderResult().isFailure()) { Throwable cause = request.decoderResult().cause(); if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause)); } else { transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index 41cb72aa32273..133206e1322d4 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -73,7 +73,7 @@ public void close() throws Exception { closeFuture.await(); if (closeFuture.isSuccess() == false) { Throwable cause = closeFuture.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); throw (Exception) cause; } } @@ -84,7 +84,7 @@ public void addCloseListener(BiConsumer listener) { listener.accept(null, null); } else { final Throwable cause = f.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); assert cause instanceof Exception; listener.accept(null, (Exception) cause); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java index 2cdaa4708d15a..637bbafff8eaf 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -223,7 +223,7 @@ public static NettyListener fromBiConsumer(BiConsumer biConsume biConsumer.accept(null, null); } else { if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); biConsumer.accept(null, new Exception(cause)); } else { biConsumer.accept(null, (Exception) cause); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 47229a0df2f6e..129f0ada77d5d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,6 +21,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -82,7 +83,8 @@ protected NioTcpServerChannel bind(String name, InetSocketAddress address) throw } @Override - protected NioTcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected NioTcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); NioTcpChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 9322bfd71222a..baae00f81a3be 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -93,12 +92,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index d9de422bb43e1..c1f2bc9627108 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' dependencies { - testCompile project(path: ':client:rest-high-level', configuration: 'shadow') + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 992d3ce71f623..9250122025c0a 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -90,14 +90,14 @@ public void testDieWithDignity() throws Exception { final Iterator it = lines.iterator(); - boolean fatalErrorOnTheNetworkLayer = false; + boolean fatalError = false; boolean fatalErrorInThreadExiting = false; - while (it.hasNext() && (fatalErrorOnTheNetworkLayer == false || fatalErrorInThreadExiting == false)) { + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final String line = it.next(); - if (line.contains("fatal error on the network layer")) { - fatalErrorOnTheNetworkLayer = true; - } else if (line.matches(".*\\[ERROR\\]\\[o.e.b.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { + fatalError = true; + } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); @@ -105,7 +105,7 @@ public void testDieWithDignity() throws Exception { } } - assertTrue(fatalErrorOnTheNetworkLayer); + assertTrue(fatalError); assertTrue(fatalErrorInThreadExiting); } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 0b936e44e5beb..d7111f64a1baf 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -908,9 +908,6 @@ public void testHistoryUUIDIsAdded() throws Exception { private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); - if (false == (runningAgainstOldCluster && oldClusterVersion.before(Version.V_5_5_0))) { - listSnapshotRequest.addParameter("verbose", "true"); - } Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", listSnapshotResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", listSnapshotResponse)); diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4a0c91469629d..4c3b48cbac946 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -69,9 +67,7 @@ esvagrant { } forbiddenApisMain { - signaturesURLs = [ - PrecommitTasks.getResource('/forbidden/jdk-signatures.txt') - ] + replaceSignatureFiles 'jdk-signatures' } // we don't have additional tests for the tests themselves diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 090c429fd82c0..13281a2a232f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -33,6 +33,11 @@ "type" : "number", "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "default" : 128 + }, + "max_concurrent_shard_requests" : { + "type" : "number", + "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : "The default grows with the number of nodes in the cluster but is at most 256." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json new file mode 100644 index 0000000000000..487beaba86520 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json @@ -0,0 +1,23 @@ +{ + "nodes.reload_secure_settings": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-reload-secure-settings.html", + "methods": ["POST"], + "url": { + "path": "/_nodes/reload_secure_settings", + "paths": ["/_nodes/reload_secure_settings", "/_nodes/{node_id}/reload_secure_settings"], + "parts": { + "node_id": { + "type": "list", + "description": "A comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes." + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index 536e2bfaf9495..fb884ddfca2c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -61,3 +61,35 @@ setup: - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - match: { responses.3.error.root_cause.0.index: index_3 } - match: { responses.4.hits.total: 4 } + +--- +"Least impact smoke test": +# only passing these parameters to make sure they are consumed + - do: + max_concurrent_shard_requests: 1 + max_concurrent_searches: 1 + msearch: + body: + - index: index_* + - query: + match: {foo: foo} + - index: index_2 + - query: + match_all: {} + - index: index_1 + - query: + match: {foo: foo} + - index: index_3 + - query: + match_all: {} + - type: test + - query: + match_all: {} + + - match: { responses.0.hits.total: 2 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 1 } + - match: { responses.3.error.root_cause.0.type: index_not_found_exception } + - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.3.error.root_cause.0.index: index_3 } + - match: { responses.4.hits.total: 4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml new file mode 100644 index 0000000000000..0a4cf0d64a001 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.reload_secure_settings/10_basic.yml @@ -0,0 +1,8 @@ +--- +"node_reload_secure_settings test": + + - do: + nodes.reload_secure_settings: {} + + - is_true: nodes + - is_true: cluster_name diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml index 5ecc357e0e167..6ab18146bba68 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml @@ -233,3 +233,51 @@ query: match_all: {} size: 0 + +--- +"Scroll max_score is null": + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + indices.create: + index: test_scroll + - do: + index: + index: test_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_scroll + type: test + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + search: + index: test_scroll + size: 1 + scroll: 1m + sort: foo + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - length: {hits.hits: 1 } + - match: { hits.max_score: null } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - length: {hits.hits: 1 } + - match: { hits.max_score: null } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 521dc4c1cac8d..dad05cce4eb4a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -244,6 +244,23 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 0 } +--- +"no hits and inner_hits max_score null": + + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + search: + index: test + body: + size: 0 + collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} } + sort: [{ sort: desc }] + + - match: { hits.max_score: null } + --- "field collapsing and multiple inner_hits": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index dc6b130b28957..c63dee2e211f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -128,7 +128,6 @@ setup: - match: { hits.total: 2 } - match: { aggregations.some_agg.doc_count: 3 } - - do: search: pre_filter_shard_size: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index dfe0b6825cdc5..62770e2915d23 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -39,6 +39,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} - do: @@ -52,6 +53,7 @@ setup: boost: 2 - match: {hits.total: 1} + - match: {hits.max_score: 2} - match: {hits.hits.0._score: 2} - do: @@ -61,6 +63,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml index 24920580c4552..4d7ee91bef5f3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -29,6 +29,7 @@ query_weight: 5 rescore_query_weight: 10 + - match: {hits.max_score: 15} - match: { hits.hits.0._score: 15 } - match: { hits.hits.0._explanation.value: 15 } diff --git a/server/build.gradle b/server/build.gradle index 1964eddd03e95..c01fb92b05080 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -58,13 +58,13 @@ if (!isEclipse && !isIdea) { sourceCompatibility = 9 targetCompatibility = 9 } - - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } } - */ jar { metaInf { @@ -304,17 +304,7 @@ thirdPartyAudit.excludes = [ 'com.google.common.geometry.S2LatLng', ] -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} - -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter'] } @@ -341,4 +331,3 @@ if (isEclipse == false || project.path == ":server-tests") { check.dependsOn integTest integTest.mustRunAfter test } - diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 9a02b76b3e038..c009bb3818cc8 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -44,7 +44,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -137,17 +136,7 @@ public ElasticsearchException(StreamInput in) throws IOException { super(in.readOptionalString(), in.readException()); readStackTrace(this, in); headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - } else { - for (Iterator>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry> header = iterator.next(); - if (header.getKey().startsWith("es.")) { - metadata.put(header.getKey(), header.getValue()); - iterator.remove(); - } - } - } + metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); } /** @@ -287,15 +276,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(this.getMessage()); out.writeException(this.getCause()); writeStackTraces(this, out); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); - } else { - Map> finalHeaders = new HashMap<>(headers.size() + metadata.size()); - finalHeaders.putAll(headers); - finalHeaders.putAll(metadata); - out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { @@ -1018,11 +1000,11 @@ private enum ElasticsearchExceptionHandle { STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145, UNKNOWN_VERSION_ADDED), TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class, - org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1), + org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED), SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class, - org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2), + org.elasticsearch.env.ShardLockObtainFailedException::new, 147, UNKNOWN_VERSION_ADDED), UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, - org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, Version.V_5_2_0), + org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0_alpha1); diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 59eb8b60dadba..9f7566662174a 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -136,42 +136,6 @@ public static String formatStackTrace(final StackTraceElement[] stackTrace) { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - public static Optional maybeError(final Throwable cause, final Logger logger) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); - } - - final Queue queue = new LinkedList<>(); - queue.add(cause); - int iterations = 0; - while (!queue.isEmpty()) { - iterations++; - if (iterations > MAX_ITERATIONS) { - logger.warn("giving up looking for fatal errors", cause); - break; - } - final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); - } - Collections.addAll(queue, current.getSuppressed()); - if (current.getCause() != null) { - queue.add(current.getCause()); - } - } - return Optional.empty(); - } - /** * Rethrows the first exception in the list and adds all remaining to the suppressed list. * If the given list is empty no exception is thrown @@ -243,20 +207,57 @@ public static boolean reThrowIfNotNull(@Nullable Throwable e) { return true; } + static final int MAX_ITERATIONS = 1024; + + /** + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable + */ + public static Optional maybeError(final Throwable cause, final Logger logger) { + // early terminate if the cause is already an error + if (cause instanceof Error) { + return Optional.of((Error) cause); + } + + final Queue queue = new LinkedList<>(); + queue.add(cause); + int iterations = 0; + while (queue.isEmpty() == false) { + iterations++; + // this is a guard against deeply nested or circular chains of exceptions + if (iterations > MAX_ITERATIONS) { + logger.warn("giving up looking for fatal errors", cause); + break; + } + final Throwable current = queue.remove(); + if (current instanceof Error) { + return Optional.of((Error) current); + } + Collections.addAll(queue, current.getSuppressed()); + if (current.getCause() != null) { + queue.add(current.getCause()); + } + } + return Optional.empty(); + } + /** * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. + * caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See + * {@link #maybeError(Throwable, Logger)} for the semantics. * - * @param throwable the throwable to test + * @param throwable the throwable to possibly throw on another thread */ - public static void dieOnError(Throwable throwable) { - final Optional maybeError = ExceptionsHelper.maybeError(throwable, logger); - if (maybeError.isPresent()) { + public static void maybeDieOnAnotherThread(final Throwable throwable) { + ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> { /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack + * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here + * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the + * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause + * during exit. */ try { // try to log the current stack trace @@ -264,12 +265,12 @@ public static void dieOnError(Throwable throwable) { logger.error("fatal error\n{}", formatted); } finally { new Thread( - () -> { - throw maybeError.get(); - }) - .start(); + () -> { + throw error; + }) + .start(); } - } + }); } /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index a815a9711d023..7303e8d34c907 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -43,85 +43,6 @@ public class Version implements Comparable, ToXContentFragment { * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 * indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id */ - public static final int V_5_0_0_alpha1_ID = 5000001; - public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha2_ID = 5000002; - public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha3_ID = 5000003; - public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha4_ID = 5000004; - public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_alpha5_ID = 5000005; - public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_beta1_ID = 5000026; - public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_rc1_ID = 5000051; - public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_1_ID = 5000199; - public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - public static final int V_5_0_2_ID = 5000299; - public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - // no version constant for 5.1.0 due to inadvertent release - public static final int V_5_1_1_ID = 5010199; - public static final Version V_5_1_1 = new Version(V_5_1_1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_1_2_ID = 5010299; - public static final Version V_5_1_2 = new Version(V_5_1_2_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_2_0_ID = 5020099; - public static final Version V_5_2_0 = new Version(V_5_2_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_0); - public static final int V_5_2_1_ID = 5020199; - public static final Version V_5_2_1 = new Version(V_5_2_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_2_2_ID = 5020299; - public static final Version V_5_2_2 = new Version(V_5_2_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_0_ID = 5030099; - public static final Version V_5_3_0 = new Version(V_5_3_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_1_ID = 5030199; - public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_2_ID = 5030299; - public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_3_ID = 5030399; - public static final Version V_5_3_3 = new Version(V_5_3_3_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_4_0_ID = 5040099; - public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0); - public static final int V_5_4_1_ID = 5040199; - public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_2_ID = 5040299; - public static final Version V_5_4_2 = new Version(V_5_4_2_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_3_ID = 5040399; - public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_5_0_ID = 5050099; - public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_1_ID = 5050199; - public static final Version V_5_5_1 = new Version(V_5_5_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_2_ID = 5050299; - public static final Version V_5_5_2 = new Version(V_5_5_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_3_ID = 5050399; - public static final Version V_5_5_3 = new Version(V_5_5_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_0_ID = 5060099; - public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_1_ID = 5060199; - public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_2_ID = 5060299; - public static final Version V_5_6_2 = new Version(V_5_6_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_3_ID = 5060399; - public static final Version V_5_6_3 = new Version(V_5_6_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_4_ID = 5060499; - public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_5_ID = 5060599; - public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_6_ID = 5060699; - public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_7_ID = 5060799; - public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_8_ID = 5060899; - public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_9_ID = 5060999; - public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_10_ID = 5061099; - public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_11_ID = 5061199; - public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -174,10 +95,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_2_ID = 6030299; public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_3_ID = 6030399; - public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_1_ID = 6040199; + public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; @@ -200,10 +121,10 @@ public static Version fromId(int id) { return V_7_0_0_alpha1; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_1_ID: + return V_6_4_1; case V_6_4_0_ID: return V_6_4_0; - case V_6_3_3_ID: - return V_6_3_3; case V_6_3_2_ID: return V_6_3_2; case V_6_3_1_ID: @@ -246,84 +167,6 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_6_11_ID: - return V_5_6_11; - case V_5_6_10_ID: - return V_5_6_10; - case V_5_6_9_ID: - return V_5_6_9; - case V_5_6_8_ID: - return V_5_6_8; - case V_5_6_7_ID: - return V_5_6_7; - case V_5_6_6_ID: - return V_5_6_6; - case V_5_6_5_ID: - return V_5_6_5; - case V_5_6_4_ID: - return V_5_6_4; - case V_5_6_3_ID: - return V_5_6_3; - case V_5_6_2_ID: - return V_5_6_2; - case V_5_6_1_ID: - return V_5_6_1; - case V_5_6_0_ID: - return V_5_6_0; - case V_5_5_3_ID: - return V_5_5_3; - case V_5_5_2_ID: - return V_5_5_2; - case V_5_5_1_ID: - return V_5_5_1; - case V_5_5_0_ID: - return V_5_5_0; - case V_5_4_3_ID: - return V_5_4_3; - case V_5_4_2_ID: - return V_5_4_2; - case V_5_4_1_ID: - return V_5_4_1; - case V_5_4_0_ID: - return V_5_4_0; - case V_5_3_3_ID: - return V_5_3_3; - case V_5_3_2_ID: - return V_5_3_2; - case V_5_3_1_ID: - return V_5_3_1; - case V_5_3_0_ID: - return V_5_3_0; - case V_5_2_2_ID: - return V_5_2_2; - case V_5_2_1_ID: - return V_5_2_1; - case V_5_2_0_ID: - return V_5_2_0; - case V_5_1_2_ID: - return V_5_1_2; - case V_5_1_1_ID: - return V_5_1_1; - case V_5_0_2_ID: - return V_5_0_2; - case V_5_0_1_ID: - return V_5_0_1; - case V_5_0_0_ID: - return V_5_0_0; - case V_5_0_0_rc1_ID: - return V_5_0_0_rc1; - case V_5_0_0_beta1_ID: - return V_5_0_0_beta1; - case V_5_0_0_alpha5_ID: - return V_5_0_0_alpha5; - case V_5_0_0_alpha4_ID: - return V_5_0_0_alpha4; - case V_5_0_0_alpha3_ID: - return V_5_0_0_alpha3; - case V_5_0_0_alpha2_ID: - return V_5_0_0_alpha2; - case V_5_0_0_alpha1_ID: - return V_5_0_0_alpha1; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } @@ -473,8 +316,11 @@ private static class DeclaredVersionsHolder { * is a beta or RC release then the version itself is returned. */ public Version minimumCompatibilityVersion() { - if (major >= 6) { - // all major versions from 6 onwards are compatible with last minor series of the previous major + if (major == 6) { + // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore + return Version.fromId(5060099); + } else if (major >= 7) { + // all major versions from 7 onwards are compatible with last minor series of the previous major Version bwcVersion = null; for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 58efce77c9fd8..b3ec72d527099 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -131,9 +131,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; -import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; -import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -446,7 +444,6 @@ public void reg actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); - actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java index 08a97d4d993a6..490a1760abeaa 100644 --- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -33,7 +33,7 @@ public abstract class ShardOperationFailedException implements Streamable, ToXContent { protected String index; - protected int shardId; + protected int shardId = -1; protected String reason; protected RestStatus status; protected Throwable cause; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 40960c3362086..b6959afba5d89 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Nullable; @@ -69,7 +68,6 @@ public ClusterAllocationExplainRequest() { public ClusterAllocationExplainRequest(StreamInput in) throws IOException { super(in); - checkVersion(in.getVersion()); this.index = in.readOptionalString(); this.shard = in.readOptionalVInt(); this.primary = in.readOptionalBoolean(); @@ -94,7 +92,6 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - checkVersion(out.getVersion()); super.writeTo(out); out.writeOptionalString(index); out.writeOptionalVInt(shard); @@ -251,11 +248,4 @@ public static ClusterAllocationExplainRequest parse(XContentParser parser) throw public void readFrom(StreamInput in) throws IOException { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - - private void checkVersion(Version version) { - if (version.before(Version.V_5_2_0)) { - throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0 + - " nodes, node version [" + version + "]"); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 50df7b1bb26e0..fb3e6ac71adf3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -19,142 +19,22 @@ package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.nodes.BaseNodesRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.SecureString; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; - -import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * Request for a reload secure settings action + * Request for a reload secure settings action. */ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { - /** - * The password which is broadcasted to all nodes, but is never stored on - * persistent storage. The password is used to reread and decrypt the contents - * of the node's keystore (backing the implementation of - * {@code SecureSettings}). - */ - private SecureString secureSettingsPassword; - public NodesReloadSecureSettingsRequest() { } /** - * Reload secure settings only on certain nodes, based on the nodes ids - * specified. If none are passed, secure settings will be reloaded on all the - * nodes. + * Reload secure settings only on certain nodes, based on the nodes IDs specified. If none are passed, secure settings will be reloaded + * on all the nodes. */ - public NodesReloadSecureSettingsRequest(String... nodesIds) { + public NodesReloadSecureSettingsRequest(final String... nodesIds) { super(nodesIds); } - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (secureSettingsPassword == null) { - validationException = addValidationError("secure settings password cannot be null (use empty string instead)", - validationException); - } - return validationException; - } - - public SecureString secureSettingsPassword() { - return secureSettingsPassword; - } - - public NodesReloadSecureSettingsRequest secureStorePassword(SecureString secureStorePassword) { - this.secureSettingsPassword = secureStorePassword; - return this; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - final byte[] passwordBytes = in.readByteArray(); - try { - this.secureSettingsPassword = new SecureString(utf8BytesToChars(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - final byte[] passwordBytes = charsToUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeByteArray(passwordBytes); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - - /** - * Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding - * conversions to String. The provided char[] is not modified by this method, so - * the caller needs to take care of clearing the value if it is sensitive. - */ - private static byte[] charsToUtf8Bytes(char[] chars) { - final CharBuffer charBuffer = CharBuffer.wrap(chars); - final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); - final byte[] bytes; - if (byteBuffer.hasArray()) { - // there is no guarantee that the byte buffers backing array is the right size - // so we need to make a copy - bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data - } else { - final int length = byteBuffer.limit() - byteBuffer.position(); - bytes = new byte[length]; - byteBuffer.get(bytes); - // if the buffer is not read only we can reset and fill with 0's - if (byteBuffer.isReadOnly() == false) { - byteBuffer.clear(); // reset - for (int i = 0; i < byteBuffer.limit(); i++) { - byteBuffer.put((byte) 0); - } - } - } - return bytes; - } - - /** - * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding - * conversions to String. The provided byte[] is not modified by this method, so - * the caller needs to take care of clearing the value if it is sensitive. - */ - public static char[] utf8BytesToChars(byte[] utf8Bytes) { - final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); - final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); - final char[] chars; - if (charBuffer.hasArray()) { - // there is no guarantee that the char buffers backing array is the right size - // so we need to make a copy - chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); - Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data - } else { - final int length = charBuffer.limit() - charBuffer.position(); - chars = new char[length]; - charBuffer.get(chars); - // if the buffer is not read only we can reset and fill with 0's - if (charBuffer.isReadOnly() == false) { - charBuffer.clear(); // reset - for (int i = 0; i < charBuffer.limit(); i++) { - charBuffer.put((char) 0); - } - } - } - return chars; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index b5f2f73e56f51..c8250455e6ba3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -19,19 +19,8 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Objects; /** * Builder for the reload secure settings nodes request @@ -39,46 +28,8 @@ public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder { - public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password"; - public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) { super(client, action, new NodesReloadSecureSettingsRequest()); } - public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { - request.secureStorePassword(secureStorePassword); - return this; - } - - public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { - Objects.requireNonNull(xContentType); - // EMPTY is ok here because we never call namedObject - try (InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, stream)) { - XContentParser.Token token; - token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("expected an object, but found token [{}]", token); - } - token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) { - throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME, - token); - } - token = parser.nextToken(); - if (token != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", - SECURE_SETTINGS_PASSWORD_FIELD_NAME, token); - } - final String password = parser.text(); - setSecureStorePassword(new SecureString(password.toCharArray())); - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchParseException("expected end of object, but found token [{}]", token); - } - } - return this; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 0f44170fa603b..b8a36bac68d61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; @@ -82,16 +81,13 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { @Override protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { - final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; - final SecureString secureSettingsPassword = request.secureSettingsPassword(); try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), new IllegalStateException("Keystore is missing")); } - // decrypt the keystore using the password from the request - keystore.decrypt(secureSettingsPassword.getChars()); + keystore.decrypt(new char[0]); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder() .put(environment.settings(), false) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 3ae5c2d683a27..4798aeb67c199 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -59,10 +58,6 @@ public ClusterSearchShardsRequest(StreamInput in) throws IOException { routing = in.readOptionalString(); preference = in.readOptionalString(); - if (in.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - in.readStringArray(); - } indicesOptions = IndicesOptions.readIndicesOptions(in); } @@ -78,10 +73,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - out.writeStringArray(Strings.EMPTY_ARRAY); - } indicesOptions.writeIndicesOptions(out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 28c7903efde81..f8d448d0fe11c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; @@ -77,14 +76,12 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < nodes.length; i++) { nodes[i] = new DiscoveryNode(in); } - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - int size = in.readVInt(); - indicesAndFilters = new HashMap<>(); - for (int i = 0; i < size; i++) { - String index = in.readString(); - AliasFilter aliasFilter = new AliasFilter(in); - indicesAndFilters.put(index, aliasFilter); - } + int size = in.readVInt(); + indicesAndFilters = new HashMap<>(); + for (int i = 0; i < size; i++) { + String index = in.readString(); + AliasFilter aliasFilter = new AliasFilter(in); + indicesAndFilters.put(index, aliasFilter); } } @@ -99,12 +96,10 @@ public void writeTo(StreamOutput out) throws IOException { for (DiscoveryNode node : nodes) { node.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(indicesAndFilters.size()); - for (Map.Entry entry : indicesAndFilters.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + out.writeVInt(indicesAndFilters.size()); + for (Map.Entry entry : indicesAndFilters.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index b3b24b570eeda..41ae57031d320 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -28,7 +28,6 @@ import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.snapshots.SnapshotInfo.VERBOSE_INTRODUCED; /** * Get snapshot request @@ -75,9 +74,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { repository = in.readString(); snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - verbose = in.readBoolean(); - } + verbose = in.readBoolean(); } @Override @@ -86,9 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(repository); out.writeStringArray(snapshots); out.writeBoolean(ignoreUnavailable); - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - out.writeBoolean(verbose); - } + out.writeBoolean(verbose); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 6f702cbbe7c0a..d02d6272c9514 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -121,11 +121,7 @@ public void readFrom(StreamInput in) throws IOException { } id = in.readOptionalString(); content = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(content); - } + xContentType = in.readEnum(XContentType.class); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { context = in.readOptionalString(); source = new StoredScriptSource(in); @@ -143,9 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeBytesReference(content); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { out.writeOptionalString(context); source.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index d45ab2682a5ec..e571db951cbc1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.admin.indices.analyze; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -191,15 +190,10 @@ public void readFrom(StreamInput in) throws IOException { startOffset = in.readInt(); endOffset = in.readInt(); position = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - Integer len = in.readOptionalVInt(); - if (len != null) { - positionLength = len; - } else { - positionLength = 1; - } - } - else { + Integer len = in.readOptionalVInt(); + if (len != null) { + positionLength = len; + } else { positionLength = 1; } type = in.readOptionalString(); @@ -212,9 +206,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(startOffset); out.writeInt(endOffset); out.writeVInt(position); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeOptionalVInt(positionLength > 1 ? positionLength : null); - } + out.writeOptionalVInt(positionLength > 1 ? positionLength : null); out.writeOptionalString(type); out.writeMapWithConsistentOrder(attributes); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index f2e07e29bad6e..25f7f33647c25 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -55,8 +55,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private final Set blocks = new HashSet<>(); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -83,11 +81,6 @@ public CreateIndexClusterStateUpdateRequest aliases(Set aliases) { return this; } - public CreateIndexClusterStateUpdateRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public CreateIndexClusterStateUpdateRequest blocks(Set blocks) { this.blocks.addAll(blocks); return this; @@ -146,10 +139,6 @@ public Set aliases() { return aliases; } - public Map customs() { - return customs; - } - public Set blocks() { return blocks; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 875d17eb54bc8..fa2a395f2c9e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -58,9 +57,9 @@ import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}. @@ -87,8 +86,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public CreateIndexRequest() { @@ -388,18 +385,7 @@ public CreateIndexRequest source(Map source, DeprecationHandler depre } else if (ALIASES.match(name, deprecationHandler)) { aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } else { - // found a key which is neither custom defined nor one of the supported ones - throw new ElasticsearchParseException("unknown key [{}] for create index", name); - } + throw new ElasticsearchParseException("unknown key [{}] for create index", name); } } return this; @@ -413,18 +399,6 @@ public Set aliases() { return this.aliases; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -474,11 +448,13 @@ public void readFrom(StreamInput in) throws IOException { } mappings.put(type, source); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_6_5_0)) { + // This used to be the size of custom metadata classes + int customSize = in.readVInt(); + assert customSize == 0 : "unexpected custom metadata when none is supported"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -501,10 +477,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + // Size of custom index metadata, which is removed + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -542,10 +517,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t alias.toXContent(builder, params); } builder.endObject(); - - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index cc8fb2c32c375..d2593e7e94be3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -224,14 +223,6 @@ public CreateIndexRequestBuilder setSource(Map source) { return this; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequestBuilder addCustom(IndexMetaData.Custom custom) { - request.custom(custom); - return this; - } - /** * Sets the settings and mappings as a single source. */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index c858d0bb10651..79192693620dd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.Version; import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -66,18 +65,14 @@ protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readShardsAcknowledged(in); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - index = in.readString(); - } + index = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeShardsAcknowledged(out); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeString(index); - } + out.writeString(index); } public String index() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 4cf159c439cb5..e4384745d36e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -75,7 +75,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) - .aliases(request.aliases()).customs(request.customs()) + .aliases(request.aliases()) .waitForActiveShards(request.waitForActiveShards()); createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 1556ee2341d27..a827444acb8c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -297,10 +297,6 @@ public void readFrom(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); type = in.readOptionalString(); source = in.readString(); - if (in.getVersion().before(Version.V_5_3_0)) { - // we do not know the format from earlier versions so convert if necessary - source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); - } if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readBoolean(); // updateAllTypes } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 7195fd78154c1..5459805416e91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -185,7 +185,6 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi .masterNodeTimeout(targetIndex.masterNodeTimeout()) .settings(targetIndex.settings()) .aliases(targetIndex.aliases()) - .customs(targetIndex.customs()) .waitForActiveShards(targetIndex.waitForActiveShards()) .recoverFrom(metaData.getIndex()) .resizeType(resizeRequest.getResizeType()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java deleted file mode 100644 index acc88251970f3..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.shrink; - -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -/** - * Main class to initiate shrinking an index into a new index - * This class is only here for backwards compatibility. It will be replaced by - * TransportResizeAction in 7.x once this is backported - */ -public class TransportShrinkAction extends TransportResizeAction { - - @Inject - public TransportShrinkAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, MetaDataCreateIndexService createIndexService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { - super(settings, ShrinkAction.NAME, transportService, clusterService, threadPool, createIndexService, actionFilters, - indexNameExpressionResolver, client); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index d194b9acd1b7f..d254f989d4a29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -61,9 +60,9 @@ import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * A request to create an index template. @@ -88,8 +87,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest aliases = new HashSet<>(); - private Map customs = new HashMap<>(); - private Integer version; public PutIndexTemplateRequest() { @@ -353,15 +350,7 @@ public PutIndexTemplateRequest source(Map templateSource) { } else if (name.equals("aliases")) { aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } + throw new ElasticsearchParseException("unknown key [{}] in the template ", name); } } return this; @@ -395,15 +384,6 @@ public PutIndexTemplateRequest source(BytesReference source, XContentType xConte return source(XContentHelper.convertToMap(source, true, xContentType).v2()); } - public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - public Set aliases() { return this.aliases; } @@ -492,18 +472,15 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { final String type = in.readString(); String mappingSource = in.readString(); - if (in.getVersion().before(Version.V_5_3_0)) { - // we do not know the incoming type so convert it if needed - mappingSource = - XContentHelper.convertToJson(new BytesArray(mappingSource), false, false, XContentFactory.xContentType(mappingSource)); - } mappings.put(type, mappingSource); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_6_5_0)) { + // Used to be used for custom index metadata + int customSize = in.readVInt(); + assert customSize == 0 : "expected not to have any custom metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -530,10 +507,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -570,10 +545,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } - return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index bd8621a1a7d6f..34eccbf9d8a40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -84,7 +84,6 @@ protected void masterOperation(final PutIndexTemplateRequest request, final Clus .settings(templateSettingsBuilder.build()) .mappings(request.mappings()) .aliases(request.aliases()) - .customs(request.customs()) .create(request.create()) .masterTimeout(request.masterNodeTimeout()) .version(request.version()), diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index d0a62fe771d1f..b60bc407ce70c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -120,11 +120,7 @@ public void readFrom(StreamInput in) throws IOException { } else { index = in.readString(); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - shard = in.readInt(); - } else { - shard = RANDOM_SHARD; - } + shard = in.readInt(); valid = in.readBoolean(); explanation = in.readOptionalString(); error = in.readOptionalString(); @@ -137,9 +133,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeString(index); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeInt(shard); - } + out.writeInt(shard); out.writeBoolean(valid); out.writeOptionalString(explanation); out.writeOptionalString(error); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 7694e7583c898..a30c9ba846107 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; @@ -156,9 +155,7 @@ public void readFrom(StreamInput in) throws IOException { } explain = in.readBoolean(); rewrite = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - allShards = in.readBoolean(); - } + allShards = in.readBoolean(); } @Override @@ -171,9 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeBoolean(explain); out.writeBoolean(rewrite); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(allShards); - } + out.writeBoolean(allShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index fb535d312cf65..838293b8b1f65 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -28,11 +28,13 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,6 +44,8 @@ import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; @@ -161,11 +165,11 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw * Represents a failure. */ public static class Failure implements Writeable, ToXContentFragment { - static final String INDEX_FIELD = "index"; - static final String TYPE_FIELD = "type"; - static final String ID_FIELD = "id"; - static final String CAUSE_FIELD = "cause"; - static final String STATUS_FIELD = "status"; + public static final String INDEX_FIELD = "index"; + public static final String TYPE_FIELD = "type"; + public static final String ID_FIELD = "id"; + public static final String CAUSE_FIELD = "cause"; + public static final String STATUS_FIELD = "status"; private final String index; private final String type; @@ -175,6 +179,23 @@ public static class Failure implements Writeable, ToXContentFragment { private final long seqNo; private final boolean aborted; + public static ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "bulk_failures", + true, + a -> + new Failure( + (String)a[0], (String)a[1], (String)a[2], (Exception)a[3], RestStatus.fromCode((int)a[4]) + ) + ); + static { + PARSER.declareString(constructorArg(), new ParseField(INDEX_FIELD)); + PARSER.declareString(constructorArg(), new ParseField(TYPE_FIELD)); + PARSER.declareString(optionalConstructorArg(), new ParseField(ID_FIELD)); + PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(CAUSE_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(STATUS_FIELD)); + } + /** * For write failures before operation was assigned a sequence number. * @@ -244,8 +265,8 @@ public void writeTo(StreamOutput out) throws IOException { } private static boolean supportsAbortedFlag(Version version) { - // The "aborted" flag was added for 5.5.3 and 5.6.0, but was not in 6.0.0-beta2 - return version.after(Version.V_6_0_0_beta2) || (version.major == 5 && version.onOrAfter(Version.V_5_5_3)); + // The "aborted" flag was not in 6.0.0-beta2 + return version.after(Version.V_6_0_0_beta2); } /** @@ -322,6 +343,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public static Failure fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { return Strings.toString(this); @@ -447,11 +472,7 @@ public static BulkItemResponse readBulkItem(StreamInput in) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - opType = OpType.fromId(in.readByte()); - } else { - opType = OpType.fromString(in.readString()); - } + opType = OpType.fromId(in.readByte()); byte type = in.readByte(); if (type == 0) { @@ -474,11 +495,7 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeByte(opType.getId()); - } else { - out.writeString(opType.getLowercase()); - } + out.writeByte(opType.getId()); if (response == null) { out.writeByte((byte) 2); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index e3e94e8233944..ea4a5086d7b98 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -521,7 +521,7 @@ private long relativeTime() { void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener listener) { long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { + ingestService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 21bb452430e7a..5cfdba9294634 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -166,14 +166,14 @@ public String getName() { } /** - * Whether this field is indexed for search on all indices. + * Whether this field can be aggregated on all indices. */ public boolean isAggregatable() { return isAggregatable; } /** - * Whether this field can be aggregated on all indices. + * Whether this field is indexed for search on all indices. */ public boolean isSearchable() { return isSearchable; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 636af6101ae0e..e9e77df5f9030 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -81,24 +80,18 @@ void setMergeResults(boolean mergeResults) { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fields = in.readStringArray(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - mergeResults = in.readBoolean(); - } else { - mergeResults = true; - } + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + mergeResults = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(fields); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - out.writeBoolean(mergeResults); - } + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeBoolean(mergeResults); } /** @@ -118,7 +111,6 @@ public String[] fields() { } /** - * * The list of indices to lookup */ public FieldCapabilitiesRequest indices(String... indices) { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 959b4e572b714..f908ec7b1b289 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.Tuple; @@ -36,6 +35,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; /** @@ -57,15 +57,15 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont private FieldCapabilitiesResponse(Map> responseMap, List indexResponses) { - this.responseMap = responseMap; - this.indexResponses = indexResponses; + this.responseMap = Objects.requireNonNull(responseMap); + this.indexResponses = Objects.requireNonNull(indexResponses); } /** * Used for serialization */ FieldCapabilitiesResponse() { - this.responseMap = Collections.emptyMap(); + this(Collections.emptyMap(), Collections.emptyList()); } /** @@ -82,6 +82,7 @@ public Map> get() { List getIndexResponses() { return indexResponses; } + /** * * Get the field capabilities per type for the provided {@code field}. @@ -95,11 +96,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); - } else { - indexResponses = Collections.emptyList(); - } + indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } private static Map readField(StreamInput in) throws IOException { @@ -110,10 +107,7 @@ private static Map readField(StreamInput in) throws I public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(indexResponses); - } - + out.writeList(indexResponses); } private static void writeField(StreamOutput out, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index ef0d19a265583..b8d1f477ac108 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -90,7 +90,7 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti } }; if (totalNumRequest == 0) { - listener.onResponse(new FieldCapabilitiesResponse()); + listener.onResponse(new FieldCapabilitiesResponse(Collections.emptyMap())); } else { ActionListener innerListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index d3cd052ecad1e..6b4c74fe56cd4 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -27,26 +27,23 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class DeletePipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; @Inject - public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, IngestService ingestService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { - super(settings, DeletePipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); - this.clusterService = clusterService; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeletePipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); + this.ingestService = ingestService; } @Override @@ -60,8 +57,9 @@ protected AcknowledgedResponse newResponse() { } @Override - protected void masterOperation(DeletePipelineRequest request, ClusterState state, ActionListener listener) throws Exception { - pipelineStore.delete(clusterService, request, listener); + protected void masterOperation(DeletePipelineRequest request, ClusterState state, + ActionListener listener) throws Exception { + ingestService.delete(request, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 191ed87a42cde..540f46982a569 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -29,21 +29,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class GetPipelineTransportAction extends TransportMasterNodeReadAction { - - private final PipelineStore pipelineStore; - + @Inject public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, indexNameExpressionResolver); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); } @Override @@ -58,7 +54,7 @@ protected GetPipelineResponse newResponse() { @Override protected void masterOperation(GetPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { - listener.onResponse(new GetPipelineResponse(pipelineStore.getPipelines(state, request.getIds()))); + listener.onResponse(new GetPipelineResponse(IngestService.getPipelines(state, request.getIds()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 6447b0557db0c..abff28bcf553c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; @@ -82,11 +81,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -94,9 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index abe8f49272c77..38e1f2fb54b5b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -32,12 +32,10 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestInfo; -import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -46,19 +44,19 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; private final NodeClient client; @Inject - public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService, - NodeClient client) { - super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new); - this.clusterService = clusterService; + public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IngestService ingestService, NodeClient client) { + super( + settings, PutPipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new + ); this.client = client; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; } @Override @@ -84,7 +82,7 @@ public void onResponse(NodesInfoResponse nodeInfos) { for (NodeInfo nodeInfo : nodeInfos.getNodes()) { ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } - pipelineStore.put(clusterService, ingestInfos, request, listener); + ingestService.putPipeline(ingestInfos, request, listener); } catch (Exception e) { onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 9a7d6bb7feea9..7514a41f5756b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; @@ -32,8 +31,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import java.io.IOException; import java.util.ArrayList; @@ -76,11 +75,7 @@ public SimulatePipelineRequest(BytesReference source, XContentType xContentType) id = in.readOptionalString(); verbose = in.readBoolean(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -123,9 +118,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeBoolean(verbose); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override @@ -164,14 +157,13 @@ public boolean isVerbose() { } } - private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory(); static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; - static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, PipelineStore pipelineStore) { + static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, IngestService ingestService) { if (pipelineId == null) { throw new IllegalArgumentException("param [pipeline] is null"); } - Pipeline pipeline = pipelineStore.get(pipelineId); + Pipeline pipeline = ingestService.getPipeline(pipelineId); if (pipeline == null) { throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist"); } @@ -179,9 +171,11 @@ static Parsed parseWithPipelineId(String pipelineId, Map config, return new Parsed(pipeline, ingestDocumentList, verbose); } - static Parsed parse(Map config, boolean verbose, PipelineStore pipelineStore) throws Exception { + static Parsed parse(Map config, boolean verbose, IngestService ingestService) throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); - Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); + Pipeline pipeline = Pipeline.create( + SIMULATED_PIPELINE_ID, pipelineConfig, ingestService.getProcessorFactories(), ingestService.getScriptService() + ); List ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 2e898c1895f9a..ad8577d5244de 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -26,8 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,15 +35,15 @@ public class SimulatePipelineTransportAction extends HandledTransportAction { - private final PipelineStore pipelineStore; + private final IngestService ingestService; private final SimulateExecutionService executionService; @Inject public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ActionFilters actionFilters, NodeService nodeService) { + ActionFilters actionFilters, IngestService ingestService) { super(settings, SimulatePipelineAction.NAME, transportService, actionFilters, (Writeable.Reader) SimulatePipelineRequest::new); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; this.executionService = new SimulateExecutionService(threadPool); } @@ -55,9 +54,9 @@ protected void doExecute(Task task, SimulatePipelineRequest request, ActionListe final SimulatePipelineRequest.Parsed simulateRequest; try { if (request.getId() != null) { - simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), pipelineStore); + simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), ingestService); } else { - simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore); + simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService); } } catch (Exception e) { listener.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 3d3737b0638cd..fa532777e9dce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -134,11 +134,10 @@ private static String buildMessage(String phaseName, String msg, ShardSearchFail @Override protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("phase", phaseName); - final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default - builder.field("grouped", group); // notify that it's grouped + builder.field("grouped", true); // notify that it's grouped builder.field("failed_shards"); builder.startArray(); - ShardOperationFailedException[] failures = group ? ExceptionsHelper.groupBy(shardFailures) : shardFailures; + ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(shardFailures); for (ShardOperationFailedException failure : failures) { builder.startObject(); failure.toXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index e67517c4852b8..dd7f687294383 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -135,10 +135,8 @@ public SearchRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - maxConcurrentShardRequests = in.readVInt(); - preFilterShardSize = in.readVInt(); - } + maxConcurrentShardRequests = in.readVInt(); + preFilterShardSize = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -160,10 +158,8 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(maxConcurrentShardRequests); - out.writeVInt(preFilterShardSize); - } + out.writeVInt(maxConcurrentShardRequests); + out.writeVInt(preFilterShardSize); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } @@ -188,6 +184,10 @@ public ActionRequestValidationException validate() { if (source != null && source.size() == 0 && scroll != null) { validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); } + if (source != null && source.rescores() != null && source.rescores().isEmpty() == false && scroll != null) { + validationException = + addValidationError("using [rescore] is not allowed in a scroll context", validationException); + } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 2a97798764e59..0273d5e58219a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -374,9 +374,7 @@ public void readFrom(StreamInput in) throws IOException { } scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - skippedShards = in.readVInt(); - } + skippedShards = in.readVInt(); } @Override @@ -395,9 +393,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); - if(out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(skippedShards); - } + out.writeVInt(skippedShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 133d0291df597..a4ea2616e0a21 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -113,17 +112,8 @@ public void sendFreeContext(Transport.Connection connection, long contextId, fin public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - if (connection.getNode().getVersion().onOrAfter(Version.V_5_6_0)) { - transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, - TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); - } else { - // this might look weird but if we are in a CrossClusterSearch environment we can get a connection - // to a pre 5.latest node which is proxied by a 5.latest node under the hood since we are only compatible with 5.latest - // instead of sending the request we shortcut it here and let the caller deal with this -- see #25704 - // also failing the request instead of returning a fake answer might trigger a retry on a replica which might be on a - // compatible node - throw new IllegalArgumentException("can_match is not supported on pre 5.6 nodes"); - } + transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); } public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 98418153d501a..ddfadfa57e31e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -54,8 +54,7 @@ public class ShardSearchFailure extends ShardOperationFailedException { private SearchShardTarget shardTarget; - private ShardSearchFailure() { - + ShardSearchFailure() { } public ShardSearchFailure(Exception e) { @@ -101,6 +100,8 @@ public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws I public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { shardTarget = new SearchShardTarget(in); + index = shardTarget.getFullyQualifiedIndexName(); + shardId = shardTarget.getShardId().getId(); } reason = in.readString(); status = RestStatus.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index bc5c696894a6a..3e0c1a6d1e413 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -271,7 +271,7 @@ public boolean primary() { public void readFrom(StreamInput in) throws IOException { shardId = ShardId.readShardId(in); super.shardId = shardId.getId(); - super.index = shardId.getIndexName(); + index = shardId.getIndexName(); nodeId = in.readOptionalString(); cause = in.readException(); status = RestStatus.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index f416627c1e088..d6bf911e572c3 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -498,14 +498,10 @@ public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { doc = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(doc); - } + xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readOptionalString(); // _parent } @@ -546,9 +542,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } out.writeOptionalString(routing); if (out.getVersion().before(Version.V_7_0_0_alpha1)) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 1a028042db29b..c5a8e806f41a4 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.node.NodeValidationException; @@ -393,17 +394,22 @@ long getMaxFileSize() { static class MaxMapCountCheck implements BootstrapCheck { - private static final long LIMIT = 1 << 18; + static final long LIMIT = 1 << 18; @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { - final String message = String.format( - Locale.ROOT, - "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", - getMaxMapCount(), - LIMIT); - return BootstrapCheckResult.failure(message); + public BootstrapCheckResult check(final BootstrapContext context) { + // we only enforce the check if mmapfs is an allowed store type + if (IndexModule.NODE_STORE_ALLOW_MMAPFS.get(context.settings)) { + if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { + final String message = String.format( + Locale.ROOT, + "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", + getMaxMapCount(), + LIMIT); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } else { return BootstrapCheckResult.success(); } diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index adb2f509b999e..f97f618347af5 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -455,7 +455,7 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Builder for the field capabilities request. */ - FieldCapabilitiesRequestBuilder prepareFieldCaps(); + FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices); /** * An action that returns the field capabilities from the provided request diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 86d9d2c445f3f..553c92e6de86e 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -651,8 +651,8 @@ public ActionFuture fieldCaps(FieldCapabilitiesReques } @Override - public FieldCapabilitiesRequestBuilder prepareFieldCaps() { - return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE); + public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { + return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } static class Admin implements AdminClient { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 276e00a2ba3db..f7606d4bb061f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -284,7 +284,7 @@ public String toString() { final String TAB = " "; for (IndexMetaData indexMetaData : metaData) { sb.append(TAB).append(indexMetaData.getIndex()); - sb.append(": v[").append(indexMetaData.getVersion()).append("]\n"); + sb.append(": v[").append(indexMetaData.getVersion()).append("], mv[").append(indexMetaData.getMappingVersion()).append("]\n"); for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { sb.append(TAB).append(TAB).append(shard).append(": "); sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], "); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 234d1ef9f17fd..0134b798c72fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -40,8 +40,6 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshot_deletions"; - // the version where SnapshotDeletionsInProgress was introduced - public static final Version VERSION_INTRODUCED = Version.V_5_2_0; // the list of snapshot deletion request entries private final List entries; @@ -135,7 +133,7 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException @Override public Version getMinimalSupportedVersion() { - return VERSION_INTRODUCED; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 87563c968af17..565c5134d1b38 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -48,12 +48,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshots"; - // denotes an undefined repository state id, which will happen when receiving a cluster state with - // a snapshot in progress from a pre 5.2.x node - public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; - // the version where repository state ids were introduced - private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0; - @Override public boolean equals(Object o) { if (this == o) return true; @@ -432,10 +426,7 @@ public SnapshotsInProgress(StreamInput in) throws IOException { builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState, reason)); } } - long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID; - if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - repositoryStateId = in.readLong(); - } + long repositoryStateId = in.readLong(); entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -471,9 +462,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(shardEntry.value.state().value()); } } - if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - out.writeLong(entry.repositoryStateId); - } + out.writeLong(entry.repositoryStateId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index efbd262b16dda..fc09741f4d9c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -138,11 +137,7 @@ public void readFrom(StreamInput in) throws IOException { retryable = in.readBoolean(); disableStatePersistence = in.readBoolean(); status = RestStatus.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - allowReleaseResources = in.readBoolean(); - } else { - allowReleaseResources = false; - } + allowReleaseResources = in.readBoolean(); } @Override @@ -156,9 +151,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); RestStatus.writeTo(out, status); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(allowReleaseResources); - } + out.writeBoolean(allowReleaseResources); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java new file mode 100644 index 0000000000000..4aa429f570499 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This is a {@code Map} that implements AbstractDiffable so it + * can be used for cluster state purposes + */ +public class DiffableStringMap extends AbstractMap implements Diffable { + + private final Map innerMap; + + DiffableStringMap(final Map map) { + this.innerMap = map; + } + + @SuppressWarnings("unchecked") + DiffableStringMap(final StreamInput in) throws IOException { + this.innerMap = (Map) (Map) in.readMap(); + } + + @Override + public String put(String key, String value) { + return innerMap.put(key, value); + } + + @Override + public Set> entrySet() { + return innerMap.entrySet(); + } + + @Override + @SuppressWarnings("unchecked") + public void writeTo(StreamOutput out) throws IOException { + out.writeMap((Map) (Map) innerMap); + } + + @Override + public Diff diff(DiffableStringMap previousState) { + return new DiffableStringMapDiff(previousState, this); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return new DiffableStringMapDiff(in); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof DiffableStringMap) { + DiffableStringMap other = (DiffableStringMap) obj; + return innerMap.equals(other.innerMap); + } else if (obj instanceof Map) { + Map other = (Map) obj; + return innerMap.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return innerMap.hashCode(); + } + + @Override + public String toString() { + return "DiffableStringMap[" + innerMap.toString() + "]"; + } + + /** + * Represents differences between two DiffableStringMaps. + */ + public static class DiffableStringMapDiff implements Diff { + + private final List deletes; + private final Map upserts; // diffs also become upserts + + private DiffableStringMapDiff(DiffableStringMap before, DiffableStringMap after) { + final List tempDeletes = new ArrayList<>(); + final Map tempUpserts = new HashMap<>(); + for (String key : before.keySet()) { + if (after.containsKey(key) == false) { + tempDeletes.add(key); + } + } + + for (Map.Entry partIter : after.entrySet()) { + String beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } + } + deletes = tempDeletes; + upserts = tempUpserts; + } + + private DiffableStringMapDiff(StreamInput in) throws IOException { + deletes = new ArrayList<>(); + upserts = new HashMap<>(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + int upsertsCount = in.readVInt(); + for (int i = 0; i < upsertsCount; i++) { + String key = in.readString(); + String newValue = in.readString(); + upserts.put(key, newValue); + } + } + + public List getDeletes() { + return deletes; + } + + public Map> getDiffs() { + return Collections.emptyMap(); + } + + public Map getUpserts() { + return upserts; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + out.writeVInt(upserts.size()); + for (Map.Entry entry : upserts.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + } + + @Override + public DiffableStringMap apply(DiffableStringMap part) { + Map builder = new HashMap<>(part.innerMap); + List deletes = getDeletes(); + for (String delete : deletes) { + builder.remove(delete); + } + assert getDiffs().size() == 0 : "there should never be diffs for DiffableStringMap"; + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return new DiffableStringMap(builder); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 18b89db72a391..a88ad4418e0c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.Assertions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.support.ActiveShardCount; @@ -64,7 +64,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Locale; @@ -80,59 +79,6 @@ public class IndexMetaData implements Diffable, ToXContentFragment { - /** - * This class will be removed in v7.0 - */ - @Deprecated - public interface Custom extends Diffable, ToXContent { - - String type(); - - Custom fromMap(Map map) throws IOException; - - Custom fromXContent(XContentParser parser) throws IOException; - - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - - /** - * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. - */ - Custom readFrom(StreamInput in) throws IOException; - - /** - * Merges from this to another, with this being more important, i.e., if something exists in this and another, - * this will prevail. - */ - Custom mergeWith(Custom another); - } - - public static Map customPrototypes = new HashMap<>(); - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); - } - - public static T lookupPrototypeSafe(String type) { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); - } - return proto; - } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); @@ -291,6 +237,7 @@ public Iterator> settings() { public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations"; static final String KEY_VERSION = "version"; + static final String KEY_MAPPING_VERSION = "mapping_version"; static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; static final String KEY_SETTINGS = "settings"; static final String KEY_STATE = "state"; @@ -309,6 +256,9 @@ public Iterator> settings() { private final Index index; private final long version; + + private final long mappingVersion; + private final long[] primaryTerms; private final State state; @@ -319,7 +269,7 @@ public Iterator> settings() { private final ImmutableOpenMap mappings; - private final ImmutableOpenMap customs; + private final ImmutableOpenMap customData; private final ImmutableOpenIntMap> inSyncAllocationIds; @@ -336,15 +286,17 @@ public Iterator> settings() { private final ActiveShardCount waitForActiveShards; private final ImmutableOpenMap rolloverInfos; - private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, + private IndexMetaData(Index index, long version, long mappingVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, - ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, + ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; + assert mappingVersion >= 0 : mappingVersion; + this.mappingVersion = mappingVersion; this.primaryTerms = primaryTerms; assert primaryTerms.length == numberOfShards; this.state = state; @@ -353,7 +305,7 @@ private IndexMetaData(Index index, long version, long[] primaryTerms, State stat this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1); this.settings = settings; this.mappings = mappings; - this.customs = customs; + this.customData = customData; this.aliases = aliases; this.inSyncAllocationIds = inSyncAllocationIds; this.requireFilters = requireFilters; @@ -394,6 +346,9 @@ public long getVersion() { return this.version; } + public long getMappingVersion() { + return mappingVersion; + } /** * The term of the current selected primary. This is a non-negative number incremented when @@ -475,22 +430,14 @@ public MappingMetaData mapping(String mappingType) { return mappings.get(mappingType); } - // we keep the shrink settings for BWC - this can be removed in 8.0 - // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 - public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid"; - public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name"; public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; - public static final Setting INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY); - public static final Setting INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY); - public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY, - INDEX_SHRINK_SOURCE_UUID); - public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY, - INDEX_SHRINK_SOURCE_NAME); + public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY); + public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY); public Index getResizeSourceIndex() { - return INDEX_RESIZE_SOURCE_UUID.exists(settings) || INDEX_SHRINK_SOURCE_UUID.exists(settings) - ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; + return INDEX_RESIZE_SOURCE_UUID.exists(settings) ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), + INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; } /** @@ -509,13 +456,12 @@ public MappingMetaData mappingOrDefault(String mappingType) { return mappings.get(MapperService.DEFAULT_MAPPING); } - public ImmutableOpenMap getCustoms() { - return this.customs; + ImmutableOpenMap getCustomData() { + return this.customData; } - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); + public Map getCustomData(final String key) { + return Collections.unmodifiableMap(this.customData.get(key)); } public ImmutableOpenIntMap> getInSyncAllocationIds() { @@ -581,7 +527,7 @@ public boolean equals(Object o) { if (state != that.state) { return false; } - if (!customs.equals(that.customs)) { + if (!customData.equals(that.customData)) { return false; } if (routingNumShards != that.routingNumShards) { @@ -610,7 +556,7 @@ public int hashCode() { result = 31 * result + aliases.hashCode(); result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); - result = 31 * result + customs.hashCode(); + result = 31 * result + customData.hashCode(); result = 31 * result + Long.hashCode(routingFactor); result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); @@ -644,25 +590,27 @@ private static class IndexMetaDataDiff implements Diff { private final String index; private final int routingNumShards; private final long version; + private final long mappingVersion; private final long[] primaryTerms; private final State state; private final Settings settings; private final Diff> mappings; private final Diff> aliases; - private final Diff> customs; + private final Diff> customData; private final Diff>> inSyncAllocationIds; private final Diff> rolloverInfos; IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { index = after.index.getName(); version = after.version; + mappingVersion = after.mappingVersion; routingNumShards = after.routingNumShards; state = after.state; settings = after.settings; primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); @@ -672,6 +620,11 @@ private static class IndexMetaDataDiff implements Diff { index = in.readString(); routingNumShards = in.readInt(); version = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + mappingVersion = in.readVLong(); + } else { + mappingVersion = 1; + } state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -679,18 +632,8 @@ private static class IndexMetaDataDiff implements Diff { MappingMetaData::readDiffFrom); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new, AliasMetaData::readDiffFrom); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + customData = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DiffableStringMap::new, + DiffableStringMap::readDiffFrom); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { @@ -707,12 +650,15 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(index); out.writeInt(routingNumShards); out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVLong(mappingVersion); + } out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); - customs.writeTo(out); + customData.writeTo(out); inSyncAllocationIds.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { rolloverInfos.writeTo(out); @@ -723,13 +669,14 @@ public void writeTo(StreamOutput out) throws IOException { public IndexMetaData apply(IndexMetaData part) { Builder builder = builder(index); builder.version(version); + builder.mappingVersion(mappingVersion); builder.setRoutingNumShards(routingNumShards); builder.state(state); builder.settings(settings); builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); - builder.customs.putAll(customs.apply(part.customs)); + builder.customMetaData.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); @@ -739,6 +686,11 @@ public IndexMetaData apply(IndexMetaData part) { public static IndexMetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(in.readString()); builder.version(in.readLong()); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + builder.mappingVersion(in.readVLong()); + } else { + builder.mappingVersion(1); + } builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -754,10 +706,17 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { builder.putAlias(aliasMd); } int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + for (int i = 0; i < customSize; i++) { + String key = in.readString(); + DiffableStringMap custom = new DiffableStringMap(in); + builder.putCustom(key, custom); + } + } else { + assert customSize == 0 : "expected no custom index metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int inSyncAllocationIdsSize = in.readVInt(); for (int i = 0; i < inSyncAllocationIdsSize; i++) { @@ -778,6 +737,9 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(index.getName()); // uuid will come as part of settings out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVLong(mappingVersion); + } out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -790,10 +752,14 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeVInt(customData.size()); + for (final ObjectObjectCursor cursor : customData) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } else { + out.writeVInt(0); } out.writeVInt(inSyncAllocationIds.size()); for (IntObjectCursor> cursor : inSyncAllocationIds) { @@ -821,11 +787,12 @@ public static class Builder { private String index; private State state = State.OPEN; private long version = 1; + private long mappingVersion = 1; private long[] primaryTerms = null; private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder mappings; private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; + private final ImmutableOpenMap.Builder customMetaData; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; @@ -834,7 +801,7 @@ public Builder(String index) { this.index = index; this.mappings = ImmutableOpenMap.builder(); this.aliases = ImmutableOpenMap.builder(); - this.customs = ImmutableOpenMap.builder(); + this.customMetaData = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); this.rolloverInfos = ImmutableOpenMap.builder(); } @@ -843,11 +810,12 @@ public Builder(IndexMetaData indexMetaData) { this.index = indexMetaData.getIndex().getName(); this.state = indexMetaData.state; this.version = indexMetaData.version; + this.mappingVersion = indexMetaData.mappingVersion; this.settings = indexMetaData.getSettings(); this.primaryTerms = indexMetaData.primaryTerms.clone(); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); - this.customs = ImmutableOpenMap.builder(indexMetaData.customs); + this.customMetaData = ImmutableOpenMap.builder(indexMetaData.customData); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); @@ -977,8 +945,8 @@ public Builder removeAllAliases() { return this; } - public Builder putCustom(String type, Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); + public Builder putCustom(String type, Map customIndexMetaData) { + this.customMetaData.put(type, new DiffableStringMap(customIndexMetaData)); return this; } @@ -1009,6 +977,15 @@ public Builder version(long version) { return this; } + public long mappingVersion() { + return mappingVersion; + } + + public Builder mappingVersion(final long mappingVersion) { + this.mappingVersion = mappingVersion; + return this; + } + /** * returns the primary term for the given shard. * See {@link IndexMetaData#primaryTerm(int)} for more information. @@ -1136,8 +1113,8 @@ public IndexMetaData build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + return new IndexMetaData(new Index(index, uuid), version, mappingVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), + tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } @@ -1145,6 +1122,7 @@ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder build builder.startObject(indexMetaData.getIndex().getName()); builder.field(KEY_VERSION, indexMetaData.getVersion()); + builder.field(KEY_MAPPING_VERSION, indexMetaData.getMappingVersion()); builder.field(KEY_ROUTING_NUM_SHARDS, indexMetaData.getRoutingNumShards()); builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH)); @@ -1164,10 +1142,9 @@ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder build } builder.endArray(); - for (ObjectObjectCursor cursor : indexMetaData.getCustoms()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); + for (ObjectObjectCursor cursor : indexMetaData.customData) { + builder.field(cursor.key); + builder.map(cursor.value); } builder.startObject(KEY_ALIASES); @@ -1218,6 +1195,7 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("expected object but got a " + token); } + boolean mappingVersion = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -1275,15 +1253,8 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti assert Version.CURRENT.major <= 5; parser.skipChildren(); } else { - // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + // assume it's custom index metadata + builder.putCustom(currentFieldName, parser.mapStrings()); } } else if (token == XContentParser.Token.START_ARRAY) { if (KEY_MAPPINGS.equals(currentFieldName)) { @@ -1316,6 +1287,9 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti builder.state(State.fromString(parser.text())); } else if (KEY_VERSION.equals(currentFieldName)) { builder.version(parser.longValue()); + } else if (KEY_MAPPING_VERSION.equals(currentFieldName)) { + mappingVersion = true; + builder.mappingVersion(parser.longValue()); } else if (KEY_ROUTING_NUM_SHARDS.equals(currentFieldName)) { builder.setRoutingNumShards(parser.intValue()); } else { @@ -1325,6 +1299,9 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token " + token); } } + if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) { + assert mappingVersion : "mapping version should be present for indices created on or after 6.5.0"; + } return builder.build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index d35a4baa1e680..7e2d92563035e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,7 +20,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -87,13 +87,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap customs; - public IndexTemplateMetaData(String name, int order, Integer version, List patterns, Settings settings, ImmutableOpenMap mappings, - ImmutableOpenMap aliases, - ImmutableOpenMap customs) { + ImmutableOpenMap aliases) { if (patterns == null || patterns.isEmpty()) { throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns); } @@ -104,7 +101,6 @@ public IndexTemplateMetaData(String name, int order, Integer version, this.settings = settings; this.mappings = mappings; this.aliases = aliases; - this.customs = customs; } public String name() { @@ -165,19 +161,6 @@ public ImmutableOpenMap getAliases() { return this.aliases; } - public ImmutableOpenMap customs() { - return this.customs; - } - - public ImmutableOpenMap getCustoms() { - return this.customs; - } - - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); - } - public static Builder builder(String name) { return new Builder(name); } @@ -227,11 +210,13 @@ public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException AliasMetaData aliasMd = new AliasMetaData(in); builder.putAlias(aliasMd); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().before(Version.V_6_5_0)) { + // Previously we allowed custom metadata + int customSize = in.readVInt(); + assert customSize == 0 : "expected no custom metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } builder.version(in.readOptionalVInt()); return builder.build(); @@ -260,10 +245,8 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().before(Version.V_6_5_0)) { + out.writeVInt(0); } out.writeOptionalVInt(version); } @@ -272,9 +255,6 @@ public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet( "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); - static { - VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); - } private String name; @@ -290,13 +270,10 @@ public static class Builder { private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; - public Builder(String name) { this.name = name; mappings = ImmutableOpenMap.builder(); aliases = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); } public Builder(IndexTemplateMetaData indexTemplateMetaData) { @@ -308,7 +285,6 @@ public Builder(IndexTemplateMetaData indexTemplateMetaData) { mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings()); aliases = ImmutableOpenMap.builder(indexTemplateMetaData.aliases()); - customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs()); } public Builder order(int order) { @@ -362,23 +338,8 @@ public Builder putAlias(AliasMetaData.Builder aliasMetaData) { return this; } - public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); - return this; - } - - public Builder removeCustom(String type) { - this.customs.remove(type); - return this; - } - - public IndexMetaData.Custom getCustom(String type) { - return this.customs.get(type); - } - public IndexTemplateMetaData build() { - return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(), - aliases.build(), customs.build()); + return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(), aliases.build()); } public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) @@ -425,12 +386,6 @@ public static void toInnerXContent(IndexTemplateMetaData indexTemplateMetaData, builder.endArray(); } - for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); - } - builder.startObject("aliases"); for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { AliasMetaData.Builder.toXContent(cursor.value, builder, params); @@ -468,15 +423,7 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser, String t builder.putAlias(AliasMetaData.Builder.fromXContent(parser)); } } else { - // check if its a custom index metadata - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - IndexMetaData.Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + throw new ElasticsearchParseException("unknown key [{}] for index template", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { if ("mappings".equals(currentFieldName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index b19d65090c6b6..3fa77059ed065 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData.Custom; import org.elasticsearch.cluster.metadata.IndexMetaData.State; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -287,7 +286,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); - Map customs = new HashMap<>(); + Map> customs = new HashMap<>(); // add the request mapping Map> mappings = new HashMap<>(); @@ -300,10 +299,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - for (Map.Entry entry : request.customs().entrySet()) { - customs.put(entry.getKey(), entry.getValue()); - } - final Index recoverFromIndex = request.recoverFrom(); if (recoverFromIndex == null) { @@ -320,18 +315,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { MapperService.parseMapping(xContentRegistry, mappingString)); } } - // handle custom - for (ObjectObjectCursor cursor : template.customs()) { - String type = cursor.key; - IndexMetaData.Custom custom = cursor.value; - IndexMetaData.Custom existing = customs.get(type); - if (existing == null) { - customs.put(type, custom); - } else { - IndexMetaData.Custom merged = existing.mergeWith(custom); - customs.put(type, merged); - } - } //handle aliases for (ObjectObjectCursor cursor : template.aliases()) { AliasMetaData aliasMetaData = cursor.value; @@ -519,7 +502,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexMetaDataBuilder.putAlias(aliasMetaData); } - for (Map.Entry customEntry : customs.entrySet()) { + for (Map.Entry> customEntry : customs.entrySet()) { indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue()); } @@ -723,10 +706,7 @@ static void prepareResizeIndexSettings( .put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id", Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) // we only try once and then give up with a shrink index - .put("index.allocation.max_retries", 1) - // we add the legacy way of specifying it here for BWC. We can remove this once it's backported to 6.x - .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) - .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); + .put("index.allocation.max_retries", 1); } else if (type == ResizeType.SPLIT) { validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 507eaf412d5fa..1baeb2459f09a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -179,9 +179,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); templateBuilder.putAlias(aliasMetaData); } - for (Map.Entry entry : request.customs.entrySet()) { - templateBuilder.putCustom(entry.getKey(), entry.getValue()); - } IndexTemplateMetaData template = templateBuilder.build(); MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template); @@ -339,7 +336,6 @@ public static class PutRequest { Settings settings = Settings.Builder.EMPTY_SETTINGS; Map mappings = new HashMap<>(); List aliases = new ArrayList<>(); - Map customs = new HashMap<>(); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; @@ -378,11 +374,6 @@ public PutRequest aliases(Set aliases) { return this; } - public PutRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public PutRequest putMapping(String mappingType, String mappingSource) { mappings.put(mappingType, mappingSource); return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 82d947b4158a2..616fd13d1fadc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -287,6 +287,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt MetaData.Builder builder = MetaData.builder(metaData); boolean updated = false; for (IndexMetaData indexMetaData : updateList) { + boolean updatedMapping = false; // do the actual merge here on the master, and update the mapping source // we use the exact same indexService and metadata we used to validate above here to actually apply the update final Index index = indexMetaData.getIndex(); @@ -303,7 +304,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt if (existingSource.equals(updatedSource)) { // same source, no changes, ignore it } else { - updated = true; + updatedMapping = true; // use the merged mapping source if (logger.isDebugEnabled()) { logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); @@ -313,7 +314,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt } } else { - updated = true; + updatedMapping = true; if (logger.isDebugEnabled()) { logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { @@ -329,7 +330,16 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); } } + if (updatedMapping) { + indexMetaDataBuilder.mappingVersion(1 + indexMetaDataBuilder.mappingVersion()); + } + /* + * This implicitly increments the index metadata version and builds the index metadata. This means that we need to have + * already incremented the mapping version if necessary. Therefore, the mapping version increment must remain before this + * statement. + */ builder.put(indexMetaDataBuilder); + updated |= updatedMapping; } if (updated) { return ClusterState.builder(currentState).metaData(builder).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java index 153fc2cbe3e7d..8b97f1357fa00 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; @@ -82,11 +81,7 @@ public NodeAllocationResult(DiscoveryNode node, Decision decision, int weightRan public NodeAllocationResult(StreamInput in) throws IOException { node = new DiscoveryNode(in); shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new); - if (in.getVersion().before(Version.V_5_2_1)) { - canAllocateDecision = Decision.readFrom(in); - } else { - canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); - } + canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); nodeDecision = AllocationDecision.readFrom(in); weightRanking = in.readVInt(); } @@ -95,15 +90,7 @@ public NodeAllocationResult(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeOptionalWriteable(shardStoreInfo); - if (out.getVersion().before(Version.V_5_2_1)) { - if (canAllocateDecision == null) { - Decision.NO.writeTo(out); - } else { - canAllocateDecision.writeTo(out); - } - } else { - out.writeOptionalWriteable(canAllocateDecision); - } + out.writeOptionalWriteable(canAllocateDecision); nodeDecision.writeTo(out); out.writeVInt(weightRanking); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index 0ee8d095f49a6..bf65162d215b9 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -25,6 +25,8 @@ import java.util.ArrayList; import java.util.Collection; +import static org.apache.lucene.geo.GeoUtils.MAX_LAT_INCL; + /** * Utilities for converting to/from the GeoHash standard * @@ -48,6 +50,8 @@ public class GeoHashUtils { private static final double LAT_SCALE = (0x1L<>>= 4; - // deinterleave and add 1 to lat and lon to get topRight - long lat = BitUtil.deinterleave(ghLong >>> 1) + 1; - long lon = BitUtil.deinterleave(ghLong) + 1; - GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len); - - return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + // deinterleave + long lon = BitUtil.deinterleave(ghLong >>> 1); + long lat = BitUtil.deinterleave(ghLong); + if (lat < MAX_LAT_BITS) { + // add 1 to lat and lon to get topRight + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)(lat + 1), (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + } else { + // We cannot go north of north pole, so just using 90 degrees instead of calculating it using + // add 1 to lon to get lon of topRight, we are going to use 90 for lat + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lat, (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), MAX_LAT_INCL, bottomLeft.lon(), topRight.lon()); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java index cc354145b11bb..dd5d98dfabbd0 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java +++ b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java @@ -16,6 +16,8 @@ package org.elasticsearch.common.inject.matcher; +import org.elasticsearch.common.SuppressForbidden; + import java.lang.annotation.Annotation; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -327,7 +329,9 @@ public String toString() { return "inPackage(" + targetPackage.getName() + ")"; } + @SuppressForbidden(reason = "ClassLoader.getDefinedPackage not available yet") public Object readResolve() { + // TODO minJava >= 9 : use ClassLoader.getDefinedPackage and remove @SuppressForbidden return inPackage(Package.getPackage(packageName)); } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 90420369513ae..1c1e56878932d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -98,14 +98,14 @@ public class Lucene { assert annotation == null : "DocValuesFormat " + LATEST_DOC_VALUES_FORMAT + " is deprecated" ; } - public static final String SOFT_DELETE_FIELD = "__soft_delete"; + public static final String SOFT_DELETES_FIELD = "__soft_deletes"; public static final NamedAnalyzer STANDARD_ANALYZER = new NamedAnalyzer("_standard", AnalyzerScope.GLOBAL, new StandardAnalyzer()); public static final NamedAnalyzer KEYWORD_ANALYZER = new NamedAnalyzer("_keyword", AnalyzerScope.GLOBAL, new KeywordAnalyzer()); public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; - public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); + public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, Float.NaN); public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { @@ -201,7 +201,7 @@ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Direc } final CommitPoint cp = new CommitPoint(si, directory); try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) @@ -225,7 +225,7 @@ public static void cleanLuceneIndex(Directory directory) throws IOException { } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... @@ -884,7 +884,7 @@ public LeafReader wrap(LeafReader leaf) { if (hardLiveDocs == null) { return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); } - // TODO: Avoid recalculate numDocs everytime. + // TODO: Can we avoid calculate numDocs by using SegmentReader#getSegmentInfo with LUCENE-8458? int numDocs = 0; for (int i = 0; i < hardLiveDocs.length(); i++) { if (hardLiveDocs.get(i)) { @@ -910,7 +910,7 @@ public CacheHelper getReaderCacheHelper() { /** * Returns a numeric docvalues which can be used to soft-delete documents. */ - public static NumericDocValuesField newSoftDeleteField() { - return new NumericDocValuesField(SOFT_DELETE_FIELD, 1); + public static NumericDocValuesField newSoftDeletesField() { + return new NumericDocValuesField(SOFT_DELETES_FIELD, 1); } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index 28be480ad741c..3a037bed62b7f 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -70,7 +70,7 @@ final class PerThreadIDVersionAndSeqNoLookup { final Terms terms = reader.terms(uidField); if (terms == null) { // If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields. - final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETE_FIELD); + final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); final NumericDocValues tombstoneDV = reader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); if (softDeletesDV == null || tombstoneDV == null) { throw new IllegalArgumentException("reader does not have _uid terms but not a no-op segment; " + diff --git a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java index 5243809c64a1b..08787cea9df73 100644 --- a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java +++ b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java @@ -104,24 +104,12 @@ public void updateKeyWithNamedWildcard(String key) { namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); } - public boolean isWildcard() { - return isWildcard; - } - - public synchronized void addChild(TrieNode child) { - addInnerChild(child.key, child); - } - private void addInnerChild(String key, TrieNode child) { Map newChildren = new HashMap<>(children); newChildren.put(key, child); children = unmodifiableMap(newChildren); } - public TrieNode getChild(String key) { - return children.get(key); - } - public synchronized void insert(String[] path, int index, T value) { if (index >= path.length) return; @@ -302,7 +290,7 @@ public void insert(String path, T value) { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insert(strings, index, value); @@ -327,7 +315,7 @@ public void insertOrUpdate(String path, T value, BiFunction updater) { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insertOrUpdate(strings, index, value, updater); @@ -352,7 +340,7 @@ public T retrieve(String path, Map params, TrieMatchingMode trie int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 8847c8138a706..9f1d7d8a39530 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -199,7 +199,7 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { - addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, logger, settings)); + addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, settings)); } /** @@ -208,11 +208,78 @@ public synchronized void addSettingsUpdateConsumer(Consumer consumer, */ public synchronized void addAffixUpdateConsumer(Setting.AffixSetting setting, BiConsumer consumer, BiConsumer validator) { + ensureSettingIsRegistered(setting); + addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); + } + + /** + * Adds a affix settings consumer that accepts the values for two settings. The consumer is only notified if one or both settings change + * and if the provided validator succeeded. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ * This method registers a compound updater that is useful if two settings are depending on each other. + * The consumer is always provided with both values even if only one of the two changes. + */ + public synchronized void addAffixUpdateConsumer(Setting.AffixSetting settingA, Setting.AffixSetting settingB, + BiConsumer> consumer, + BiConsumer> validator) { + // it would be awesome to have a generic way to do that ie. a set of settings that map to an object with a builder + // down the road this would be nice to have! + ensureSettingIsRegistered(settingA); + ensureSettingIsRegistered(settingB); + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + + addSettingsUpdater(new SettingUpdater>>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return affixUpdaterA.hasChanged(current, previous) || affixUpdaterB.hasChanged(current, previous); + } + + @Override + public Map> getValue(Settings current, Settings previous) { + Map> map = new HashMap<>(); + BiConsumer aConsumer = (key, value) -> { + assert map.containsKey(key) == false : "duplicate key: " + key; + map.put(key, new Tuple<>(value, settingB.getConcreteSettingForNamespace(key).get(current))); + }; + BiConsumer bConsumer = (key, value) -> { + Tuple abTuple = map.get(key); + if (abTuple != null) { + map.put(key, new Tuple<>(abTuple.v1(), value)); + } else { + assert settingA.getConcreteSettingForNamespace(key).get(current).equals(settingA.getConcreteSettingForNamespace + (key).get(previous)) : "expected: " + settingA.getConcreteSettingForNamespace(key).get(current) + + " but was " + settingA.getConcreteSettingForNamespace(key).get(previous); + map.put(key, new Tuple<>(settingA.getConcreteSettingForNamespace(key).get(current), value)); + } + }; + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater(aConsumer, logger, (a,b) ->{}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater(bConsumer, logger, (a,b) ->{}); + affixUpdaterA.apply(current, previous); + affixUpdaterB.apply(current, previous); + for (Map.Entry> entry : map.entrySet()) { + validator.accept(entry.getKey(), entry.getValue()); + } + return Collections.unmodifiableMap(map); + } + + @Override + public void apply(Map> values, Settings current, Settings previous) { + for (Map.Entry> entry : values.entrySet()) { + consumer.accept(entry.getKey(), entry.getValue()); + } + } + }); + } + + private void ensureSettingIsRegistered(Setting.AffixSetting setting) { final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); if (setting != registeredSetting) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } - addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index de8691b3b6871..237fc911db627 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -63,6 +63,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; @@ -264,12 +265,14 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, + IndexModule.NODE_STORE_ALLOW_MMAPFS, ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, + RemoteClusterAware.REMOTE_CLUSTERS_PROXY, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 57d176ba41435..f3de294046c42 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -204,8 +204,10 @@ public boolean isPrivateSetting(String key) { case IndexMetaData.SETTING_VERSION_UPGRADED: case IndexMetaData.SETTING_INDEX_PROVIDED_NAME: case MergePolicyConfig.INDEX_MERGE_ENABLED: - case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY: - case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY: + // we keep the shrink settings for BWC - this can be removed in 8.0 + // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 + case "index.shrink.source.uuid": + case "index.shrink.source.name": case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY: case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY: return true; diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 94edb5a297afa..7b432c0ed1e18 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -547,7 +547,7 @@ public String toString() { }; } - static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, Logger logger, + static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, final List> configuredSettings) { return new AbstractScopedSettings.SettingUpdater() { @@ -1009,6 +1009,10 @@ public static Setting simpleString(String key, Property... properties) { return new Setting<>(key, s -> "", Function.identity(), properties); } + public static Setting simpleString(String key, Function parser, Property... properties) { + return new Setting<>(key, s -> "", parser, properties); + } + public static Setting simpleString(String key, Setting fallback, Property... properties) { return new Setting<>(key, fallback, Function.identity(), properties); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 37efff5a0bebd..4017e43b071fd 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -48,6 +48,7 @@ import static java.time.temporal.ChronoField.MILLI_OF_SECOND; import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; import static java.time.temporal.ChronoField.MONTH_OF_YEAR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; public class DateFormatters { @@ -81,7 +82,7 @@ public class DateFormatters { .appendFraction(MILLI_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() - .appendOffset("+HHmm", "Z") + .appendZoneOrOffsetId() .optionalEnd() .optionalEnd() .toFormatter(Locale.ROOT); @@ -95,7 +96,7 @@ public class DateFormatters { .appendFraction(MILLI_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() - .appendZoneOrOffsetId() + .appendOffset("+HHmm", "Z") .optionalEnd() .optionalEnd() .toFormatter(Locale.ROOT); @@ -106,6 +107,40 @@ public class DateFormatters { private static final CompoundDateTimeFormatter STRICT_DATE_OPTIONAL_TIME = new CompoundDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_FORMATTER_1, STRICT_DATE_OPTIONAL_TIME_FORMATTER_2); + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_1 = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_2 = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalStart() + .appendOffset("+HHmm", "Z") + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + + /** + * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. + */ + private static final CompoundDateTimeFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = + new CompoundDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_1, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS_2); + ///////////////////////////////////////// // // BEGIN basic time formatters @@ -1326,6 +1361,8 @@ public static CompoundDateTimeFormatter forPattern(String input, Locale locale) return STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS; } else if ("strictDateOptionalTime".equals(input) || "strict_date_optional_time".equals(input)) { return STRICT_DATE_OPTIONAL_TIME; + } else if ("strictDateOptionalTimeNanos".equals(input) || "strict_date_optional_time_nanos".equals(input)) { + return STRICT_DATE_OPTIONAL_TIME_NANOS; } else if ("strictDateTime".equals(input) || "strict_date_time".equals(input)) { return STRICT_DATE_TIME; } else if ("strictDateTimeNoMillis".equals(input) || "strict_date_time_no_millis".equals(input)) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 057a970470b15..d38eb03fae3dd 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -160,11 +160,13 @@ public static String threadName(Settings settings, String namePrefix) { if (Node.NODE_NAME_SETTING.exists(settings)) { return threadName(Node.NODE_NAME_SETTING.get(settings), namePrefix); } else { + // TODO this should only be allowed in tests return threadName("", namePrefix); } } public static String threadName(final String nodeName, final String namePrefix) { + // TODO missing node names should only be allowed in tests return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]"; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index 38abe90ad46dc..5793bcf8a0e49 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.time.CompoundDateTimeFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.joda.time.DateTime; @@ -33,6 +35,19 @@ import org.joda.time.tz.CachedDateTimeZone; import org.joda.time.tz.FixedDateTimeZone; +import java.time.DayOfWeek; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.Month; +import java.time.MonthDay; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.Year; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; @@ -49,6 +64,9 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension { public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); + public static final CompoundDateTimeFormatter DEFAULT_FORMATTER = DateFormatters.forPattern("strict_date_optional_time_nanos"); + public static final CompoundDateTimeFormatter LOCAL_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSS"); + public static final CompoundDateTimeFormatter OFFSET_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSSZZZZZ"); @Override public Map, XContentBuilder.Writer> getXContentWriters() { @@ -62,6 +80,19 @@ public Map, XContentBuilder.Writer> getXContentWriters() { writers.put(MutableDateTime.class, XContentBuilder::timeValue); writers.put(DateTime.class, XContentBuilder::timeValue); writers.put(TimeValue.class, (b, v) -> b.value(v.toString())); + writers.put(ZonedDateTime.class, XContentBuilder::timeValue); + writers.put(OffsetDateTime.class, XContentBuilder::timeValue); + writers.put(OffsetTime.class, XContentBuilder::timeValue); + writers.put(java.time.Instant.class, XContentBuilder::timeValue); + writers.put(LocalDateTime.class, XContentBuilder::timeValue); + writers.put(LocalDate.class, XContentBuilder::timeValue); + writers.put(LocalTime.class, XContentBuilder::timeValue); + writers.put(DayOfWeek.class, (b, v) -> b.value(v.toString())); + writers.put(Month.class, (b, v) -> b.value(v.toString())); + writers.put(MonthDay.class, (b, v) -> b.value(v.toString())); + writers.put(Year.class, (b, v) -> b.value(v.toString())); + writers.put(Duration.class, (b, v) -> b.value(v.toString())); + writers.put(Period.class, (b, v) -> b.value(v.toString())); writers.put(BytesReference.class, (b, v) -> { if (v == null) { @@ -102,6 +133,14 @@ public Map, Function> getDateTransformers() { transformers.put(Calendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); transformers.put(GregorianCalendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); transformers.put(Instant.class, d -> DEFAULT_DATE_PRINTER.print((Instant) d)); + transformers.put(ZonedDateTime.class, d -> DEFAULT_FORMATTER.format((ZonedDateTime) d)); + transformers.put(OffsetDateTime.class, d -> DEFAULT_FORMATTER.format((OffsetDateTime) d)); + transformers.put(OffsetTime.class, d -> OFFSET_TIME_FORMATTER.format((OffsetTime) d)); + transformers.put(LocalDateTime.class, d -> DEFAULT_FORMATTER.format((LocalDateTime) d)); + transformers.put(java.time.Instant.class, + d -> DEFAULT_FORMATTER.format(ZonedDateTime.ofInstant((java.time.Instant) d, ZoneOffset.UTC))); + transformers.put(LocalDate.class, d -> ((LocalDate) d).toString()); + transformers.put(LocalTime.class, d -> LOCAL_TIME_FORMATTER.format((LocalTime) d)); return transformers; } } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e47fe7a7a70ed..f34798605d784 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider; import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -40,6 +41,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -69,10 +71,11 @@ public class DiscoveryModule { public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, - AllocationService allocationService) { + AllocationService allocationService, Path configFile) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); + hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java new file mode 100644 index 0000000000000..f339ae43a703e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from {@link #UNICAST_HOSTS_FILE}. + * + * Each unicast host/port that is part of the discovery process must be listed on + * a separate line. If the port is left off an entry, a default port of 9300 is + * assumed. An example unicast hosts file could read: + * + * 67.81.244.10 + * 67.81.244.11:9305 + * 67.81.244.15:9400 + */ +public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; + + private final Path unicastHostsFilePath; + private final Path legacyUnicastHostsFilePath; + + public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { + super(settings); + this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); + this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); + } + + private List getHostsList() { + if (Files.exists(unicastHostsFilePath)) { + return readFileContents(unicastHostsFilePath); + } + + if (Files.exists(legacyUnicastHostsFilePath)) { + deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " + + "instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath); + return readFileContents(legacyUnicastHostsFilePath); + } + + logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath); + + return Collections.emptyList(); + } + + private List readFileContents(Path path) { + try (Stream lines = Files.lines(path)) { + return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments + .collect(Collectors.toList()); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); + return Collections.emptyList(); + } + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); + logger.debug("seed addresses: {}", transportAddresses); + return transportAddresses; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 715b78b14ffdb..7f2eae492fd56 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -21,10 +21,11 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -59,7 +60,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -84,8 +84,10 @@ */ public final class IndexModule { + public static final Setting NODE_STORE_ALLOW_MMAPFS = Setting.boolSetting("node.store.allow_mmapfs", true, Property.NodeScope); + public static final Setting INDEX_STORE_TYPE_SETTING = - new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); /** On which extensions to load data into the file-system cache upon opening of files. * This only works with the mmap directory, and even in that case is still @@ -289,7 +291,7 @@ IndexEventListener freeze() { // pkg private for testing } } - private static boolean isBuiltinType(String storeType) { + public static boolean isBuiltinType(String storeType) { for (Type type : Type.values()) { if (type.match(storeType)) { return true; @@ -298,21 +300,48 @@ private static boolean isBuiltinType(String storeType) { return false; } + public enum Type { - NIOFS, - MMAPFS, - SIMPLEFS, - FS; + NIOFS("niofs"), + MMAPFS("mmapfs"), + SIMPLEFS("simplefs"), + FS("fs"); + + private final String settingsKey; + + Type(final String settingsKey) { + this.settingsKey = settingsKey; + } + + private static final Map TYPES; + + static { + final Map types = new HashMap<>(4); + for (final Type type : values()) { + types.put(type.settingsKey, type); + } + TYPES = Collections.unmodifiableMap(types); + } public String getSettingsKey() { - return this.name().toLowerCase(Locale.ROOT); + return this.settingsKey; + } + + public static Type fromSettingsKey(final String key) { + final Type type = TYPES.get(key); + if (type == null) { + throw new IllegalArgumentException("no matching type for [" + key + "]"); + } + return type; } + /** * Returns true iff this settings matches the type. */ public boolean match(String setting) { return getSettingsKey().equals(setting); } + } /** @@ -325,6 +354,16 @@ public interface IndexSearcherWrapperFactory { IndexSearcherWrapper newWrapper(IndexService indexService); } + public static Type defaultStoreType(final boolean allowMmapfs) { + if (allowMmapfs && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + return Type.MMAPFS; + } else if (Constants.WINDOWS) { + return Type.SIMPLEFS; + } else { + return Type.NIOFS; + } + } + public IndexService newIndexService( NodeEnvironment environment, NamedXContentRegistry xContentRegistry, @@ -343,20 +382,7 @@ public IndexService newIndexService( IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); - final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); - final IndexStore store; - if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) { - store = new IndexStore(indexSettings); - } else { - Function factory = indexStoreFactories.get(storeType); - if (factory == null) { - throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); - } - store = factory.apply(indexSettings); - if (store == null) { - throw new IllegalStateException("store must not be null"); - } - } + final IndexStore store = getIndexStore(indexSettings, indexStoreFactories); final QueryCache queryCache; if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { BiFunction queryCacheProvider = forceQueryCacheProvider.get(); @@ -375,6 +401,39 @@ public IndexService newIndexService( indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } + private static IndexStore getIndexStore( + final IndexSettings indexSettings, final Map> indexStoreFactories) { + final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); + final Type type; + final Boolean allowMmapfs = NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings()); + if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { + type = defaultStoreType(allowMmapfs); + } else { + if (isBuiltinType(storeType)) { + type = Type.fromSettingsKey(storeType); + } else { + type = null; + } + } + if (type != null && type == Type.MMAPFS && allowMmapfs == false) { + throw new IllegalArgumentException("store type [mmapfs] is not allowed"); + } + final IndexStore store; + if (storeType.isEmpty() || isBuiltinType(storeType)) { + store = new IndexStore(indexSettings); + } else { + Function factory = indexStoreFactories.get(storeType); + if (factory == null) { + throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); + } + store = factory.apply(indexSettings); + if (store == null) { + throw new IllegalStateException("store must not be null"); + } + } + return store; + } + /** * creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing. * doing so will result in an exception. diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 5e9e811bc32ec..6ffbc44676e0b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -522,8 +522,8 @@ List getSearchOperationListener() { // pkg private for } @Override - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { - return mapperService().updateMapping(indexMetaData); + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { + return mapperService().updateMapping(currentIndexMetaData, newIndexMetaData); } private class StoreCloseListener implements Store.OnClose { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index f6cb58a0676fa..6a612091b9796 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -75,11 +75,10 @@ public final class IndexSettings { switch(s) { case "false": case "true": - case "fix": case "checksum": return s; default: - throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); + throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, checksum] but was: " + s); } }, Property.IndexScope); @@ -242,7 +241,7 @@ public final class IndexSettings { * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. */ public static final Setting INDEX_SOFT_DELETES_SETTING = - Setting.boolSetting("index.soft_deletes.enabled", true, Property.IndexScope); + Setting.boolSetting("index.soft_deletes.enabled", false, Property.IndexScope, Property.Final); /** * Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index edf00e63e49cf..08724d6e79426 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -593,36 +593,33 @@ public enum SearcherScope { public abstract Closeable acquireRetentionLockForPeerRecovery(); /** - * Creates a new translog snapshot from this engine for reading translog operations whose seq# in the provided range. - * The caller has to close the returned snapshot after finishing the reading. + * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive) */ - public abstract Translog.Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException; - - public abstract TranslogStats getTranslogStats(); + public abstract Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException; /** - * Returns the last location that the translog of this engine has written into. + * Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive). + * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public abstract Translog.Location getTranslogLastWriteLocation(); + public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; /** - * Creates a new "translog" snapshot from Lucene for reading operations whose seqno in the requesting seqno range + * Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine. */ - public abstract Translog.Snapshot newLuceneChangesSnapshot(String source, MapperService mapperService, - long minSeqNo, long maxSeqNo, boolean requiredFullRange) throws IOException; + public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; /** - * Creates a new history snapshot for reading operations since the provided seqno. - * The returned snapshot can be retrieved from either Lucene index or translog files. + * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) */ - public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; + public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException; + + public abstract TranslogStats getTranslogStats(); /** - * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. + * Returns the last location that the translog of this engine has written into. */ - public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; - - public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException; + public abstract Translog.Location getTranslogLastWriteLocation(); protected final void ensureOpen(Exception suppressed) { if (isClosed.get()) { @@ -1642,10 +1639,12 @@ public interface Warmer { public abstract int fillSeqNoGaps(long primaryTerm) throws IOException; /** - * Performs recovery from the transaction log. + * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). * This operation will close the engine if the recovery fails. + * + * @param recoverUpToSeqNo the upper bound, inclusive, of sequence number to be recovered */ - public abstract Engine recoverFromTranslog() throws IOException; + public abstract Engine recoverFromTranslog(long recoverUpToSeqNo) throws IOException; /** * Do not replay translog operations, but make the engine be ready. diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 0c04f70d436e7..00d1f67f01bd9 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -149,7 +149,7 @@ public class InternalEngine extends Engine { private final CounterMetric numDocDeletes = new CounterMetric(); private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); - private final NumericDocValuesField softDeleteField = Lucene.newSoftDeleteField(); + private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); private final boolean softDeleteEnabled; private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; @@ -393,7 +393,7 @@ private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) { } @Override - public InternalEngine recoverFromTranslog() throws IOException { + public InternalEngine recoverFromTranslog(long recoverUpToSeqNo) throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -401,7 +401,7 @@ public InternalEngine recoverFromTranslog() throws IOException { throw new IllegalStateException("Engine has already been recovered"); } try { - recoverFromTranslogInternal(); + recoverFromTranslogInternal(recoverUpToSeqNo); } catch (Exception e) { try { pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush @@ -423,11 +423,12 @@ public void skipTranslogRecovery() { pendingTranslogRecovery.set(false); // we are good - now we can commit } - private void recoverFromTranslogInternal() throws IOException { + private void recoverFromTranslogInternal(long recoverUpToSeqNo) throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; - final long translogGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGen)) { + final long translogFileGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), translogFileGen), recoverUpToSeqNo)) { opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot); } catch (Exception e) { throw new EngineException(shardId, "failed to recover from translog", e); @@ -479,11 +480,6 @@ public void syncTranslog() throws IOException { revisitIndexDeletionPolicyOnTranslogSynced(); } - @Override - public Translog.Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - return getTranslog().newSnapshotFromMinSeqNo(minSeqNo); - } - /** * Creates a new history snapshot for reading operations since the provided seqno. * The returned snapshot can be retrieved from either Lucene index or translog files. @@ -491,7 +487,7 @@ public Translog.Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOExcepti @Override public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - return newLuceneChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); + return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); } else { return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); } @@ -503,12 +499,8 @@ public Translog.Snapshot readHistoryOperations(String source, MapperService mapp @Override public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - try (Translog.Snapshot snapshot = - newLuceneChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { + try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { return snapshot.totalOperations(); - } catch (IOException ex) { - maybeFailEngine(source, ex); - throw ex; } } else { return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); @@ -528,6 +520,7 @@ public Translog.Location getTranslogLastWriteLocation() { private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { if (combinedDeletionPolicy.hasUnreferencedCommits()) { indexWriter.deleteUnusedFiles(); + translog.trimUnreferencedReaders(); } } @@ -847,9 +840,9 @@ public IndexResult index(Index index) throws IOException { if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - // if we have document failure, record it as a no-op in the translog with the generated seq_no + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no final NoOp noOp = new NoOp(indexResult.getSeqNo(), index.primaryTerm(), index.origin(), - index.startTime(), indexResult.getFailure().getMessage()); + index.startTime(), indexResult.getFailure().toString()); location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; @@ -1053,7 +1046,9 @@ private void addDocs(final List docs, final IndexWriter i private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { assert softDeleteEnabled : "Add history documents but soft-deletes is disabled"; - docs.forEach(d -> d.add(softDeleteField)); + for (ParseContext.Document doc : docs) { + doc.add(softDeletesField); // soft-deleted every document before adding to Lucene + } if (docs.size() > 1) { indexWriter.addDocuments(docs); } else { @@ -1146,9 +1141,9 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { if (softDeleteEnabled) { if (docs.size() > 1) { - indexWriter.softUpdateDocuments(uid, docs, softDeleteField); + indexWriter.softUpdateDocuments(uid, docs, softDeletesField); } else { - indexWriter.softUpdateDocument(uid, docs.get(0), softDeleteField); + indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); } } else { if (docs.size() > 1) { @@ -1185,8 +1180,10 @@ public DeleteResult delete(Delete delete) throws IOException { if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), - delete.primaryTerm(), deleteResult.getFailure().getMessage())); + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp(deleteResult.getSeqNo(), delete.primaryTerm(), delete.origin(), + delete.startTime(), deleteResult.getFailure().toString()); + location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } @@ -1291,11 +1288,11 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) final ParseContext.Document doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + doc + " ]"; - doc.add(softDeleteField); + doc.add(softDeletesField); if (plan.addStaleOpToLucene || plan.currentlyDeleted) { indexWriter.addDocument(doc); } else { - indexWriter.softUpdateDocument(delete.uid(), doc, softDeleteField); + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); } } else if (plan.currentlyDeleted == false) { // any exception that comes from this is a either an ACE or a fatal exception there @@ -1406,7 +1403,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { final ParseContext.Document doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; - doc.add(softDeleteField); + doc.add(softDeletesField); indexWriter.addDocument(doc); } catch (Exception ex) { if (maybeFailEngine("noop", ex)) { @@ -1440,6 +1437,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. + final long localCheckpointBeforeRefresh = getLocalCheckpoint(); // this will also cause version map ram to be freed hence we always account for it. final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh(); @@ -1465,6 +1463,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { } finally { store.decRef(); } + lastRefreshedCheckpointListener.updateRefreshedCheckpoint(localCheckpointBeforeRefresh); } } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -1479,7 +1478,8 @@ final void refresh(String source, SearcherScope scope) throws EngineException { } finally { writingBytes.addAndGet(-bytes); } - + assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: @@ -1869,6 +1869,8 @@ private void releaseIndexCommit(IndexCommit snapshot) throws IOException { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); + // Here we don't have to trim translog because snapshotting an index commit + // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } } @@ -2083,9 +2085,9 @@ private IndexWriterConfig getIndexWriterConfig() { // background merges MergePolicy mergePolicy = config().getMergePolicy(); if (softDeleteEnabled) { - iwc.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD); + iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery, - new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); + new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); } iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); @@ -2438,19 +2440,26 @@ long getNumDocUpdates() { } @Override - public Translog.Snapshot newLuceneChangesSnapshot(String source, MapperService mapperService, - long minSeqNo, long maxSeqNo, boolean requiredFullRange) throws IOException { + public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { // TODO: Should we defer the refresh until we really need it? ensureOpen(); - if (lastRefreshedCheckpoint() < maxSeqNo) { + if (lastRefreshedCheckpoint() < toSeqNo) { refresh(source, SearcherScope.INTERNAL); } Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( - searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, minSeqNo, maxSeqNo, requiredFullRange); + searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); searcher = null; return snapshot; + } catch (Exception e) { + try { + maybeFailEngine("acquire changes snapshot", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; } finally { IOUtils.close(searcher); } @@ -2486,9 +2495,11 @@ final long getMinRetainedSeqNo() { @Override public Closeable acquireRetentionLockForPeerRecovery() { - final Closeable translogLock = translog.acquireRetentionLock(); - final Releasable softDeletesLock = softDeletesPolicy.acquireRetentionLock(); - return () -> IOUtils.close(translogLock, softDeletesLock); + if (softDeleteEnabled) { + return softDeletesPolicy.acquireRetentionLock(); + } else { + return translog.acquireRetentionLock(); + } } @Override @@ -2544,21 +2555,31 @@ public long softUpdateDocuments(Term term, Iterable Math.max(curr, checkpoint)); + assert refreshedCheckpoint.get() >= checkpoint : refreshedCheckpoint.get() + " < " + checkpoint; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index 783188fbb6546..deebfba9ed424 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.common.bytes.BytesReference; @@ -86,8 +85,8 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); } - if (searchBatchSize < 0) { - throw new IllegalArgumentException("Search_batch_size must not be negative [" + searchBatchSize + "]"); + if (searchBatchSize <= 0) { + throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); } final AtomicBoolean closed = new AtomicBoolean(); this.onClose = () -> { @@ -213,10 +212,10 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray } private TopDocs searchOperations(ScoreDoc after) throws IOException { - final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo); + final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, lastSeenSeqNo + 1, toSeqNo); final Sort sortedBySeqNoThenByTerm = new Sort( - new SortedNumericSortField(SeqNoFieldMapper.NAME, SortField.Type.LONG), - new SortedNumericSortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true) + new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG), + new SortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true) ); return indexSearcher.searchAfter(after, rangeQuery, searchBatchSize, sortedBySeqNoThenByTerm); } @@ -281,9 +280,9 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { } private boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { - final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETE_FIELD); + final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); if (ndv == null || ndv.advanceExact(segmentDocId) == false) { - throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETE_FIELD + "] is not found"); + throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); } return ndv.longValue() == 1; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 72f41f110dd27..fde97562de8f8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -72,7 +72,7 @@ static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Su builder.add(retainSourceQuerySupplier.get(), BooleanClause.Occur.FILTER); IndexSearcher s = new IndexSearcher(reader); s.setQueryCache(null); - Weight weight = s.createWeight(builder.build(), false, 1.0f); + Weight weight = s.createWeight(s.rewrite(builder.build()), false, 1.0f); Scorer scorer = weight.scorer(reader.getContext()); if (scorer != null) { return new SourcePruningFilterCodecReader(recoverySourceField, reader, BitSet.of(scorer.iterator(), reader.maxDoc())); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 9c334f795511d..20b4bb37cc7aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -166,7 +167,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); } else if (propName.equals("norms")) { - builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, "norms") == false); + TypeParsers.parseNorms(builder, name, propNode); iterator.remove(); } else if (propName.equals("eager_global_ordinals")) { builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals")); @@ -256,8 +257,10 @@ public void setSplitQueriesOnWhitespace(boolean splitQueriesOnWhitespace) { public Query existsQuery(QueryShardContext context) { if (hasDocValues()) { return new DocValuesFieldExistsQuery(name()); - } else { + } else if (omitNorms()) { return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); } } @@ -366,17 +369,19 @@ protected void parseCreateField(ParseContext context, List field // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); - if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { + if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { Field field = new Field(fieldType().name(), binaryValue, fieldType()); fields.add(field); + + if (fieldType().hasDocValues() == false && fieldType().omitNorms()) { + createFieldNamesField(context, fields); + } } + if (fieldType().hasDocValues()) { fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); - } else if (fieldType().stored() || fieldType().indexOptions() != IndexOptions.NONE) { - createFieldNamesField(context, fields); } } - @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 5f3f4a4de49d6..4a3fa852e7f7d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; @@ -314,7 +315,13 @@ public boolean isAggregatable() { /** Generates a query that will only match documents that contain the given value. * The default implementation returns a {@link TermQuery} over the value bytes, * boosted by {@link #boost()}. - * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */ + * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type or if the field is not searchable + * due to the way it is configured (eg. not indexed) + * @throws ElasticsearchParseException if {@code value} cannot be converted to the expected data type + * @throws UnsupportedOperationException if the field is not searchable regardless of options + * @throws QueryShardException if the field is not searchable regardless of options + */ + // TODO: Standardize exception types public abstract Query termQuery(Object value, @Nullable QueryShardContext context); /** Build a constant-scoring query that matches all values. The default implementation uses a diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 921e472c94ff1..d06374d5d8915 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.Term; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -52,6 +53,7 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.search.suggest.completion.context.ContextMapping; import java.io.Closeable; import java.io.IOException; @@ -191,8 +193,8 @@ public static Map parseMapping(NamedXContentRegistry xContentReg /** * Update mapping by only merging the metadata that is different between received and stored entries */ - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { - assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex(); + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { + assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex(); // go over and add the relevant mappings (or update them) Set existingMappers = new HashSet<>(); if (mapper != null) { @@ -204,7 +206,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { final Map updatedEntries; try { // only update entries if needed - updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); + updatedEntries = internalMerge(newIndexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; @@ -212,9 +214,11 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { boolean requireRefresh = false; + assertMappingVersion(currentIndexMetaData, newIndexMetaData, updatedEntries); + for (DocumentMapper documentMapper : updatedEntries.values()) { String mappingType = documentMapper.type(); - CompressedXContent incomingMappingSource = indexMetaData.mapping(mappingType).source(); + CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source(); String op = existingMappers.contains(mappingType) ? "updated" : "added"; if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { @@ -239,6 +243,45 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { return requireRefresh; } + private void assertMappingVersion( + final IndexMetaData currentIndexMetaData, + final IndexMetaData newIndexMetaData, + final Map updatedEntries) { + if (Assertions.ENABLED + && currentIndexMetaData != null + && currentIndexMetaData.getCreationVersion().onOrAfter(Version.V_6_5_0)) { + if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) { + // if the mapping version is unchanged, then there should not be any updates and all mappings should be the same + assert updatedEntries.isEmpty() : updatedEntries; + for (final ObjectCursor mapping : newIndexMetaData.getMappings().values()) { + final CompressedXContent currentSource = currentIndexMetaData.mapping(mapping.value.type()).source(); + final CompressedXContent newSource = mapping.value.source(); + assert currentSource.equals(newSource) : + "expected current mapping [" + currentSource + "] for type [" + mapping.value.type() + "] " + + "to be the same as new mapping [" + newSource + "]"; + } + } else { + // if the mapping version is changed, it should increase, there should be updates, and the mapping should be different + final long currentMappingVersion = currentIndexMetaData.getMappingVersion(); + final long newMappingVersion = newIndexMetaData.getMappingVersion(); + assert currentMappingVersion < newMappingVersion : + "expected current mapping version [" + currentMappingVersion + "] " + + "to be less than new mapping version [" + newMappingVersion + "]"; + assert updatedEntries.isEmpty() == false; + for (final DocumentMapper documentMapper : updatedEntries.values()) { + final MappingMetaData currentMapping = currentIndexMetaData.mapping(documentMapper.type()); + if (currentMapping != null) { + final CompressedXContent currentSource = currentMapping.source(); + final CompressedXContent newSource = documentMapper.mappingSource(); + assert currentSource.equals(newSource) == false : + "expected current mapping [" + currentSource + "] for type [" + documentMapper.type() + "] " + + "to be different than new mapping"; + } + } + } + } + } + public void merge(Map> mappings, MergeReason reason) { Map mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); for (Map.Entry> entry : mappings.entrySet()) { @@ -421,6 +464,8 @@ private synchronized Map internalMerge(@Nullable Documen MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers, fullPathObjectMappers, fieldTypes); + ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get); + if (reason == MergeReason.MAPPING_UPDATE) { // this check will only be performed on the master node when there is // a call to the update mapping API. For all other cases like @@ -465,11 +510,11 @@ private synchronized Map internalMerge(@Nullable Documen // commit the change if (defaultMappingSource != null) { this.defaultMappingSource = defaultMappingSource; + this.defaultMapper = defaultMapper; } if (newMapper != null) { this.mapper = newMapper; } - this.defaultMapper = defaultMapper; this.fieldTypes = fieldTypes; this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index a6a5fab0d04f9..a43aed3b08de7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -122,8 +122,7 @@ private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, St } } - public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode, - Mapper.TypeParser.ParserContext parserContext) { + public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode) { builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false); } @@ -140,7 +139,7 @@ public static void parseTextField(FieldMapper.Builder builder, String name, Map< final String propName = entry.getKey(); final Object propNode = entry.getValue(); if ("norms".equals(propName)) { - parseNorms(builder, name, propNode, parserContext); + parseNorms(builder, name, propNode); iterator.remove(); } } @@ -265,7 +264,10 @@ private static IndexOptions nodeIndexOptionValue(final Object propNode) { } public static FormatDateTimeFormatter parseDateTimeFormatter(Object node) { - return Joda.forPattern(node.toString()); + if (node instanceof String) { + return Joda.forPattern((String) node); + } + throw new IllegalArgumentException("Invalid format: [" + node.toString() + "]: expected string value"); } public static void parseTermVector(String fieldName, String termVector, FieldMapper.Builder builder) throws MapperParsingException { diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 8b2db374c8da9..894a886182d35 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -150,13 +150,7 @@ public InnerHitBuilder(String name) { */ public InnerHitBuilder(StreamInput in) throws IOException { name = in.readOptionalString(); - if (in.getVersion().before(Version.V_5_5_0)) { - in.readOptionalString(); - in.readOptionalString(); - } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - ignoreUnmapped = in.readBoolean(); - } + ignoreUnmapped = in.readBoolean(); from = in.readVInt(); size = in.readVInt(); explain = in.readBoolean(); @@ -191,14 +185,6 @@ public InnerHitBuilder(StreamInput in) throws IOException { } } highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); - if (in.getVersion().before(Version.V_5_5_0)) { - /** - * this is needed for BWC with nodes pre 5.5 - */ - in.readNamedWriteable(QueryBuilder.class); - boolean hasChildren = in.readBoolean(); - assert hasChildren == false; - } if (in.getVersion().onOrAfter(Version.V_6_4_0)) { this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); } @@ -206,9 +192,6 @@ public InnerHitBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_5_5_0)) { - throw new IOException("Invalid output version, must >= " + Version.V_5_5_0.toString()); - } out.writeOptionalString(name); out.writeBoolean(ignoreUnmapped); out.writeVInt(from); @@ -252,84 +235,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - /** - * BWC serialization for nested {@link InnerHitBuilder}. - * Should only be used to send nested inner hits to nodes pre 5.5. - */ - protected void writeToNestedBWC(StreamOutput out, QueryBuilder query, String nestedPath) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, nestedPath, null); - } - - /** - * BWC serialization for collapsing {@link InnerHitBuilder}. - * Should only be used to send collapsing inner hits to nodes pre 5.5. - */ - public void writeToCollapseBWC(StreamOutput out) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, new MatchAllQueryBuilder(), null, null); - } - - /** - * BWC serialization for parent/child {@link InnerHitBuilder}. - * Should only be used to send hasParent or hasChild inner hits to nodes pre 5.5. - */ - public void writeToParentChildBWC(StreamOutput out, QueryBuilder query, String parentChildPath) throws IOException { - assert(out.getVersion().before(Version.V_5_5_0)) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, null, parentChildPath); - } - - private void writeToBWC(StreamOutput out, - QueryBuilder query, - String nestedPath, - String parentChildPath) throws IOException { - out.writeOptionalString(name); - if (nestedPath != null) { - out.writeOptionalString(nestedPath); - out.writeOptionalString(null); - } else { - out.writeOptionalString(null); - out.writeOptionalString(parentChildPath); - } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeBoolean(ignoreUnmapped); - } - out.writeVInt(from); - out.writeVInt(size); - out.writeBoolean(explain); - out.writeBoolean(version); - out.writeBoolean(trackScores); - out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields == null - ? null - : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); - boolean hasScriptFields = scriptFields != null; - out.writeBoolean(hasScriptFields); - if (hasScriptFields) { - out.writeVInt(scriptFields.size()); - Iterator iterator = scriptFields.stream() - .sorted(Comparator.comparing(ScriptField::fieldName)).iterator(); - while (iterator.hasNext()) { - iterator.next().writeTo(out); - } - } - out.writeOptionalWriteable(fetchSourceContext); - boolean hasSorts = sorts != null; - out.writeBoolean(hasSorts); - if (hasSorts) { - out.writeVInt(sorts.size()); - for (SortBuilder sort : sorts) { - out.writeNamedWriteable(sort); - } - } - out.writeOptionalWriteable(highlightBuilder); - out.writeNamedWriteable(query); - out.writeBoolean(false); - } - public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 0de474f8b9901..950c9e052adae 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -47,7 +46,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -220,11 +218,7 @@ public Item(@Nullable String index, @Nullable String type, XContentBuilder doc) type = in.readOptionalString(); if (in.readBoolean()) { doc = (BytesReference) in.readGenericValue(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(doc); - } + xContentType = in.readEnum(XContentType.class); } else { id = in.readString(); } @@ -242,9 +236,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeGenericValue(doc); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } else { out.writeString(id); } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 889f41a037f86..8d7c0190eb210 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -103,15 +102,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(path); out.writeVInt(scoreMode.ordinal()); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToNestedBWC(out, query, path); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } @@ -398,7 +389,7 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index e9d53d8e82948..0289ce6f6ae44 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -175,9 +175,6 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { analyzer = in.readOptionalString(); quoteAnalyzer = in.readOptionalString(); quoteFieldSuffix = in.readOptionalString(); - if (in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // auto_generate_phrase_query - } allowLeadingWildcard = in.readOptionalBoolean(); analyzeWildcard = in.readOptionalBoolean(); enablePositionIncrements = in.readBoolean(); @@ -186,27 +183,15 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { fuzzyMaxExpansions = in.readVInt(); fuzzyRewrite = in.readOptionalString(); phraseSlop = in.readVInt(); - if (in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // use_dismax - tieBreaker = in.readFloat(); - type = DEFAULT_TYPE; - } else { - type = MultiMatchQueryBuilder.Type.readFromStream(in); - tieBreaker = in.readOptionalFloat(); - } + type = MultiMatchQueryBuilder.Type.readFromStream(in); + tieBreaker = in.readOptionalFloat(); + rewrite = in.readOptionalString(); minimumShouldMatch = in.readOptionalString(); lenient = in.readOptionalBoolean(); timeZone = in.readOptionalTimeZone(); escape = in.readBoolean(); maxDeterminizedStates = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_1_1) && in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // split_on_whitespace - Boolean useAllField = in.readOptionalBoolean(); - if (useAllField != null && useAllField) { - defaultField = "*"; - } - } if (in.getVersion().onOrAfter(Version.V_6_1_0)) { autoGenerateSynonymsPhraseQuery = in.readBoolean(); fuzzyTranspositions = in.readBoolean(); @@ -226,9 +211,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(this.analyzer); out.writeOptionalString(this.quoteAnalyzer); out.writeOptionalString(this.quoteFieldSuffix); - if (out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(false); // auto_generate_phrase_query - } out.writeOptionalBoolean(this.allowLeadingWildcard); out.writeOptionalBoolean(this.analyzeWildcard); out.writeBoolean(this.enablePositionIncrements); @@ -237,24 +219,14 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(this.fuzzyMaxExpansions); out.writeOptionalString(this.fuzzyRewrite); out.writeVInt(this.phraseSlop); - if (out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(true); // use_dismax - out.writeFloat(tieBreaker != null ? tieBreaker : 0.0f); - } else { - type.writeTo(out); - out.writeOptionalFloat(tieBreaker); - } + type.writeTo(out); + out.writeOptionalFloat(tieBreaker); out.writeOptionalString(this.rewrite); out.writeOptionalString(this.minimumShouldMatch); out.writeOptionalBoolean(this.lenient); out.writeOptionalTimeZone(timeZone); out.writeBoolean(this.escape); out.writeVInt(this.maxDeterminizedStates); - if (out.getVersion().onOrAfter(Version.V_5_1_1) && out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(false); // split_on_whitespace - Boolean useAllFields = defaultField == null ? null : Regex.isMatchAllPattern(defaultField); - out.writeOptionalBoolean(useAllFields); - } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeBoolean(autoGenerateSynonymsPhraseQuery); out.writeBoolean(fuzzyTranspositions); @@ -328,8 +300,9 @@ public Map fields() { /** * @param type Sets how multiple fields should be combined to build textual part queries. */ - public void type(MultiMatchQueryBuilder.Type type) { + public QueryStringQueryBuilder type(MultiMatchQueryBuilder.Type type) { this.type = type; + return this; } /** @@ -388,7 +361,7 @@ public QueryStringQueryBuilder analyzer(String analyzer) { this.analyzer = analyzer; return this; } - + /** * The optional analyzer used to analyze the query string. Note, if a field has search analyzer * defined for it, then it will be used automatically. Defaults to the smart search analyzer. @@ -899,9 +872,9 @@ protected boolean doEquals(QueryStringQueryBuilder other) { Objects.equals(tieBreaker, other.tieBreaker) && Objects.equals(rewrite, other.rewrite) && Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && - Objects.equals(lenient, other.lenient) && + Objects.equals(lenient, other.lenient) && Objects.equals( - timeZone == null ? null : timeZone.getID(), + timeZone == null ? null : timeZone.getID(), other.timeZone == null ? null : other.timeZone.getID()) && Objects.equals(escape, other.escape) && Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) && diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 6223254874d07..b297036f2f37b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -108,14 +107,12 @@ public RangeQueryBuilder(StreamInput in) throws IOException { if (formatString != null) { format = Joda.forPattern(formatString); } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = in.readOptionalString(); - if (relationString != null) { - relation = ShapeRelation.getRelationByName(relationString); - if (relation != null && !isRelationAllowed(relation)) { - throw new IllegalArgumentException( - "[range] query does not support relation [" + relationString + "]"); - } + String relationString = in.readOptionalString(); + if (relationString != null) { + relation = ShapeRelation.getRelationByName(relationString); + if (relation != null && !isRelationAllowed(relation)) { + throw new IllegalArgumentException( + "[range] query does not support relation [" + relationString + "]"); } } } @@ -139,13 +136,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { formatString = this.format.format(); } out.writeOptionalString(formatString); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = null; - if (this.relation != null) { - relationString = this.relation.getRelationName(); - } - out.writeOptionalString(relationString); + String relationString = null; + if (this.relation != null) { + relationString = this.relation.getRelationName(); } + out.writeOptionalString(relationString); } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 46a958b58fe28..473aa636caab0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -168,27 +168,11 @@ public SimpleQueryStringBuilder(StreamInput in) throws IOException { flags = in.readInt(); analyzer = in.readOptionalString(); defaultOperator = Operator.readFromStream(in); - if (in.getVersion().before(Version.V_5_1_1)) { - in.readBoolean(); // lowercase_expanded_terms - } settings.lenient(in.readBoolean()); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.lenientSet = in.readBoolean(); - } + this.lenientSet = in.readBoolean(); settings.analyzeWildcard(in.readBoolean()); - if (in.getVersion().before(Version.V_5_1_1)) { - in.readString(); // locale - } minimumShouldMatch = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - settings.quoteFieldSuffix(in.readOptionalString()); - if (in.getVersion().before(Version.V_6_0_0_beta2)) { - Boolean useAllFields = in.readOptionalBoolean(); - if (useAllFields != null && useAllFields) { - useAllFields(true); - } - } - } + settings.quoteFieldSuffix(in.readOptionalString()); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { settings.autoGenerateSynonymsPhraseQuery(in.readBoolean()); settings.fuzzyPrefixLength(in.readVInt()); @@ -208,28 +192,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeInt(flags); out.writeOptionalString(analyzer); defaultOperator.writeTo(out); - if (out.getVersion().before(Version.V_5_1_1)) { - out.writeBoolean(true); // lowercase_expanded_terms - } out.writeBoolean(settings.lenient()); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeBoolean(lenientSet); - } + out.writeBoolean(lenientSet); out.writeBoolean(settings.analyzeWildcard()); - if (out.getVersion().before(Version.V_5_1_1)) { - out.writeString(Locale.ROOT.toLanguageTag()); // locale - } out.writeOptionalString(minimumShouldMatch); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalString(settings.quoteFieldSuffix()); - if (out.getVersion().before(Version.V_6_0_0_beta2)) { - if (useAllFields()) { - out.writeOptionalBoolean(true); - } else { - out.writeOptionalBoolean(null); - } - } - } + out.writeOptionalString(settings.quoteFieldSuffix()); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeBoolean(settings.autoGenerateSynonymsPhraseQuery()); out.writeVInt(settings.fuzzyPrefixLength()); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 8536337bfdbc2..b2e6f98f1268f 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -44,8 +44,8 @@ public abstract class AbstractBulkByScrollRequest> extends ActionRequest { public static final int SIZE_ALL_MATCHES = -1; - static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); - static final int DEFAULT_SCROLL_SIZE = 1000; + public static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); + public static final int DEFAULT_SCROLL_SIZE = 1000; public static final int AUTO_SLICES = 0; public static final String AUTO_SLICES_VALUE = "auto"; @@ -252,6 +252,14 @@ public Self setTimeout(TimeValue timeout) { return self(); } + /** + * Timeout to wait for the shards on to be available for each bulk request? + */ + public Self setTimeout(String timeout) { + this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout"); + return self(); + } + /** * The number of shard copies that must be active before proceeding with the write. */ diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java index ac206c2c44f06..7fe60db2ddda7 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java @@ -19,14 +19,23 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -36,6 +45,7 @@ import static java.lang.Math.min; import static java.util.Objects.requireNonNull; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** * Response used for actions that index many documents using a scroll request. @@ -47,6 +57,27 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr private List searchFailures; private boolean timedOut; + private static final String TOOK_FIELD = "took"; + private static final String TIMED_OUT_FIELD = "timed_out"; + private static final String FAILURES_FIELD = "failures"; + + @SuppressWarnings("unchecked") + private static final ObjectParser PARSER = + new ObjectParser<>( + "bulk_by_scroll_response", + true, + BulkByScrollResponseBuilder::new + ); + static { + PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(TOOK_FIELD)); + PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(TIMED_OUT_FIELD)); + PARSER.declareObjectArray( + BulkByScrollResponseBuilder::setFailures, (p, c) -> parseFailure(p), new ParseField(FAILURES_FIELD) + ); + // since the result of BulkByScrollResponse.Status are mixed we also parse that in this + Status.declareFields(PARSER); + } + public BulkByScrollResponse() { } @@ -87,6 +118,10 @@ public long getCreated() { return status.getCreated(); } + public long getTotal() { + return status.getTotal(); + } + public long getDeleted() { return status.getDeleted(); } @@ -171,8 +206,8 @@ public void readFrom(StreamInput in) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("took", took.millis()); - builder.field("timed_out", timedOut); + builder.field(TOOK_FIELD, took.millis()); + builder.field(TIMED_OUT_FIELD, timedOut); status.innerXContent(builder, params); builder.startArray("failures"); for (Failure failure: bulkFailures) { @@ -187,6 +222,80 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public static BulkByScrollResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null).buildResponse(); + } + + private static Object parseFailure(XContentParser parser) throws IOException { + ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + Token token; + String index = null; + String type = null; + String id = null; + Integer status = null; + Integer shardId = null; + String nodeId = null; + ElasticsearchException bulkExc = null; + ElasticsearchException searchExc = null; + while ((token = parser.nextToken()) != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + String name = parser.currentName(); + token = parser.nextToken(); + if (token == Token.START_ARRAY) { + parser.skipChildren(); + } else if (token == Token.START_OBJECT) { + switch (name) { + case SearchFailure.REASON_FIELD: + bulkExc = ElasticsearchException.fromXContent(parser); + break; + case Failure.CAUSE_FIELD: + searchExc = ElasticsearchException.fromXContent(parser); + break; + default: + parser.skipChildren(); + } + } else if (token == Token.VALUE_STRING) { + switch (name) { + // This field is the same as SearchFailure.index + case Failure.INDEX_FIELD: + index = parser.text(); + break; + case Failure.TYPE_FIELD: + type = parser.text(); + break; + case Failure.ID_FIELD: + id = parser.text(); + break; + case SearchFailure.NODE_FIELD: + nodeId = parser.text(); + break; + default: + // Do nothing + break; + } + } else if (token == Token.VALUE_NUMBER) { + switch (name) { + case Failure.STATUS_FIELD: + status = parser.intValue(); + break; + case SearchFailure.SHARD_FIELD: + shardId = parser.intValue(); + break; + default: + // Do nothing + break; + } + } + } + if (bulkExc != null) { + return new Failure(index, type, id, bulkExc, RestStatus.fromCode(status)); + } else if (searchExc != null) { + return new SearchFailure(searchExc, index, shardId, nodeId); + } else { + throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); + } + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java new file mode 100644 index 0000000000000..ad5bfd6e03cdf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.BulkByScrollTask.StatusBuilder; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Helps build a {@link BulkByScrollResponse}. Used by an instance of {@link ObjectParser} when parsing from XContent. + */ +class BulkByScrollResponseBuilder extends StatusBuilder { + private TimeValue took; + private BulkByScrollTask.Status status; + private List bulkFailures = new ArrayList<>(); + private List searchFailures = new ArrayList<>(); + private boolean timedOut; + + BulkByScrollResponseBuilder() {} + + public void setTook(long took) { + setTook(new TimeValue(took, TimeUnit.MILLISECONDS)); + } + + public void setTook(TimeValue took) { + this.took = took; + } + + public void setStatus(BulkByScrollTask.Status status) { + this.status = status; + } + + public void setFailures(List failures) { + if (failures != null) { + for (Object object: failures) { + if (object instanceof Failure) { + bulkFailures.add((Failure) object); + } else if (object instanceof SearchFailure) { + searchFailures.add((SearchFailure) object); + } + } + } + } + + public void setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + } + + public BulkByScrollResponse buildResponse() { + status = super.buildStatus(); + return new BulkByScrollResponse(took, status, bulkFailures, searchFailures, timedOut); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 9ff26b13212c7..7aa2c8a1b75a9 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -20,29 +20,41 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; import static java.lang.Math.min; import static java.util.Collections.emptyList; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** * Task storing information about a currently running BulkByScroll request. @@ -189,6 +201,124 @@ public boolean shouldCancelChildrenOnCancellation() { return true; } + /** + * This class acts as a builder for {@link Status}. Once the {@link Status} object is built by calling + * {@link #buildStatus()} it is immutable. Used by an instance of {@link ObjectParser} when parsing from + * XContent. + */ + public static class StatusBuilder { + private Integer sliceId = null; + private Long total = null; + private long updated = 0; // Not present during deleteByQuery + private long created = 0; // Not present during updateByQuery + private Long deleted = null; + private Integer batches = null; + private Long versionConflicts = null; + private Long noops = null; + private Long bulkRetries = null; + private Long searchRetries = null; + private TimeValue throttled = null; + private Float requestsPerSecond = null; + private String reasonCancelled = null; + private TimeValue throttledUntil = null; + private List sliceStatuses = new ArrayList<>(); + + public void setSliceId(Integer sliceId) { + this.sliceId = sliceId; + } + + public void setTotal(Long total) { + this.total = total; + } + + public void setUpdated(Long updated) { + this.updated = updated; + } + + public void setCreated(Long created) { + this.created = created; + } + + public void setDeleted(Long deleted) { + this.deleted = deleted; + } + + public void setBatches(Integer batches) { + this.batches = batches; + } + + public void setVersionConflicts(Long versionConflicts) { + this.versionConflicts = versionConflicts; + } + + public void setNoops(Long noops) { + this.noops = noops; + } + + public void setRetries(Tuple retries) { + if (retries != null) { + setBulkRetries(retries.v1()); + setSearchRetries(retries.v2()); + } + } + + public void setBulkRetries(Long bulkRetries) { + this.bulkRetries = bulkRetries; + } + + public void setSearchRetries(Long searchRetries) { + this.searchRetries = searchRetries; + } + + public void setThrottled(Long throttled) { + if (throttled != null) { + this.throttled = new TimeValue(throttled, TimeUnit.MILLISECONDS); + } + } + + public void setRequestsPerSecond(Float requestsPerSecond) { + if (requestsPerSecond != null) { + requestsPerSecond = requestsPerSecond == -1 ? Float.POSITIVE_INFINITY : requestsPerSecond; + this.requestsPerSecond = requestsPerSecond; + } + } + + public void setReasonCancelled(String reasonCancelled) { + this.reasonCancelled = reasonCancelled; + } + + public void setThrottledUntil(Long throttledUntil) { + if (throttledUntil != null) { + this.throttledUntil = new TimeValue(throttledUntil, TimeUnit.MILLISECONDS); + } + } + + public void setSliceStatuses(List sliceStatuses) { + if (sliceStatuses != null) { + this.sliceStatuses.addAll(sliceStatuses); + } + } + + public void addToSliceStatuses(StatusOrException statusOrException) { + this.sliceStatuses.add(statusOrException); + } + + public Status buildStatus() { + if (sliceStatuses.isEmpty()) { + try { + return new Status( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, + searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil + ); + } catch (NullPointerException npe) { + throw new IllegalArgumentException("a required field is null when building Status"); + } + } else { + return new Status(sliceStatuses, reasonCancelled); + } + } + } + public static class Status implements Task.Status, SuccessfullyProcessed { public static final String NAME = "bulk-by-scroll"; @@ -204,6 +334,76 @@ public static class Status implements Task.Status, SuccessfullyProcessed { */ public static final String INCLUDE_UPDATED = "include_updated"; + public static final String SLICE_ID_FIELD = "slice_id"; + public static final String TOTAL_FIELD = "total"; + public static final String UPDATED_FIELD = "updated"; + public static final String CREATED_FIELD = "created"; + public static final String DELETED_FIELD = "deleted"; + public static final String BATCHES_FIELD = "batches"; + public static final String VERSION_CONFLICTS_FIELD = "version_conflicts"; + public static final String NOOPS_FIELD = "noops"; + public static final String RETRIES_FIELD = "retries"; + public static final String RETRIES_BULK_FIELD = "bulk"; + public static final String RETRIES_SEARCH_FIELD = "search"; + public static final String THROTTLED_RAW_FIELD = "throttled_millis"; + public static final String THROTTLED_HR_FIELD = "throttled"; + public static final String REQUESTS_PER_SEC_FIELD = "requests_per_second"; + public static final String CANCELED_FIELD = "canceled"; + public static final String THROTTLED_UNTIL_RAW_FIELD = "throttled_until_millis"; + public static final String THROTTLED_UNTIL_HR_FIELD = "throttled_until"; + public static final String SLICES_FIELD = "slices"; + + public static Set FIELDS_SET = new HashSet<>(); + static { + FIELDS_SET.add(SLICE_ID_FIELD); + FIELDS_SET.add(TOTAL_FIELD); + FIELDS_SET.add(UPDATED_FIELD); + FIELDS_SET.add(CREATED_FIELD); + FIELDS_SET.add(DELETED_FIELD); + FIELDS_SET.add(BATCHES_FIELD); + FIELDS_SET.add(VERSION_CONFLICTS_FIELD); + FIELDS_SET.add(NOOPS_FIELD); + FIELDS_SET.add(RETRIES_FIELD); + // No need for inner level fields for retries in the set of outer level fields + FIELDS_SET.add(THROTTLED_RAW_FIELD); + FIELDS_SET.add(THROTTLED_HR_FIELD); + FIELDS_SET.add(REQUESTS_PER_SEC_FIELD); + FIELDS_SET.add(CANCELED_FIELD); + FIELDS_SET.add(THROTTLED_UNTIL_RAW_FIELD); + FIELDS_SET.add(THROTTLED_UNTIL_HR_FIELD); + FIELDS_SET.add(SLICES_FIELD); + } + + @SuppressWarnings("unchecked") + static ConstructingObjectParser, Void> RETRIES_PARSER = new ConstructingObjectParser<>( + "bulk_by_scroll_task_status_retries", + true, + a -> new Tuple(a[0], a[1]) + ); + static { + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_BULK_FIELD)); + RETRIES_PARSER.declareLong(constructorArg(), new ParseField(RETRIES_SEARCH_FIELD)); + } + + public static void declareFields(ObjectParser parser) { + parser.declareInt(StatusBuilder::setSliceId, new ParseField(SLICE_ID_FIELD)); + parser.declareLong(StatusBuilder::setTotal, new ParseField(TOTAL_FIELD)); + parser.declareLong(StatusBuilder::setUpdated, new ParseField(UPDATED_FIELD)); + parser.declareLong(StatusBuilder::setCreated, new ParseField(CREATED_FIELD)); + parser.declareLong(StatusBuilder::setDeleted, new ParseField(DELETED_FIELD)); + parser.declareInt(StatusBuilder::setBatches, new ParseField(BATCHES_FIELD)); + parser.declareLong(StatusBuilder::setVersionConflicts, new ParseField(VERSION_CONFLICTS_FIELD)); + parser.declareLong(StatusBuilder::setNoops, new ParseField(NOOPS_FIELD)); + parser.declareObject(StatusBuilder::setRetries, RETRIES_PARSER, new ParseField(RETRIES_FIELD)); + parser.declareLong(StatusBuilder::setThrottled, new ParseField(THROTTLED_RAW_FIELD)); + parser.declareFloat(StatusBuilder::setRequestsPerSecond, new ParseField(REQUESTS_PER_SEC_FIELD)); + parser.declareString(StatusBuilder::setReasonCancelled, new ParseField(CANCELED_FIELD)); + parser.declareLong(StatusBuilder::setThrottledUntil, new ParseField(THROTTLED_UNTIL_RAW_FIELD)); + parser.declareObjectArray( + StatusBuilder::setSliceStatuses, (p, c) -> StatusOrException.fromXContent(p), new ParseField(SLICES_FIELD) + ); + } + private final Integer sliceId; private final long total; private final long updated; @@ -303,11 +503,7 @@ public Status(List sliceStatuses, @Nullable String reasonCanc } public Status(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceId = in.readOptionalVInt(); - } else { - sliceId = null; - } + sliceId = in.readOptionalVInt(); total = in.readVLong(); updated = in.readVLong(); created = in.readVLong(); @@ -321,18 +517,12 @@ public Status(StreamInput in) throws IOException { requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); throttledUntil = in.readTimeValue(); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); - } else { - sliceStatuses = emptyList(); - } + sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalVInt(sliceId); - } + out.writeOptionalVInt(sliceId); out.writeVLong(total); out.writeVLong(updated); out.writeVLong(created); @@ -346,11 +536,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeFloat(requestsPerSecond); out.writeOptionalString(reasonCancelled); out.writeTimeValue(throttledUntil); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(sliceStatuses.size()); - for (StatusOrException sliceStatus : sliceStatuses) { - out.writeOptionalWriteable(sliceStatus); - } + out.writeVInt(sliceStatuses.size()); + for (StatusOrException sliceStatus : sliceStatuses) { + out.writeOptionalWriteable(sliceStatus); } } @@ -366,35 +554,40 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } + /** + * We need to write a manual parser for this because of {@link StatusOrException}. Since + * {@link StatusOrException#fromXContent(XContentParser)} tries to peek at a field first before deciding + * what needs to be it cannot use an {@link ObjectParser}. + */ public XContentBuilder innerXContent(XContentBuilder builder, Params params) throws IOException { if (sliceId != null) { - builder.field("slice_id", sliceId); + builder.field(SLICE_ID_FIELD, sliceId); } - builder.field("total", total); + builder.field(TOTAL_FIELD, total); if (params.paramAsBoolean(INCLUDE_UPDATED, true)) { - builder.field("updated", updated); + builder.field(UPDATED_FIELD, updated); } if (params.paramAsBoolean(INCLUDE_CREATED, true)) { - builder.field("created", created); + builder.field(CREATED_FIELD, created); } - builder.field("deleted", deleted); - builder.field("batches", batches); - builder.field("version_conflicts", versionConflicts); - builder.field("noops", noops); - builder.startObject("retries"); { - builder.field("bulk", bulkRetries); - builder.field("search", searchRetries); + builder.field(DELETED_FIELD, deleted); + builder.field(BATCHES_FIELD, batches); + builder.field(VERSION_CONFLICTS_FIELD, versionConflicts); + builder.field(NOOPS_FIELD, noops); + builder.startObject(RETRIES_FIELD); { + builder.field(RETRIES_BULK_FIELD, bulkRetries); + builder.field(RETRIES_SEARCH_FIELD, searchRetries); } builder.endObject(); - builder.humanReadableField("throttled_millis", "throttled", throttled); - builder.field("requests_per_second", requestsPerSecond == Float.POSITIVE_INFINITY ? -1 : requestsPerSecond); + builder.humanReadableField(THROTTLED_RAW_FIELD, THROTTLED_HR_FIELD, throttled); + builder.field(REQUESTS_PER_SEC_FIELD, requestsPerSecond == Float.POSITIVE_INFINITY ? -1 : requestsPerSecond); if (reasonCancelled != null) { - builder.field("canceled", reasonCancelled); + builder.field(CANCELED_FIELD, reasonCancelled); } - builder.humanReadableField("throttled_until_millis", "throttled_until", throttledUntil); + builder.humanReadableField(THROTTLED_UNTIL_RAW_FIELD, THROTTLED_UNTIL_HR_FIELD, throttledUntil); if (false == sliceStatuses.isEmpty()) { - builder.startArray("slices"); + builder.startArray(SLICES_FIELD); for (StatusOrException slice : sliceStatuses) { if (slice == null) { builder.nullValue(); @@ -407,6 +600,87 @@ public XContentBuilder innerXContent(XContentBuilder builder, Params params) return builder; } + public static Status fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + if (parser.currentToken() == Token.START_OBJECT) { + token = parser.nextToken(); + } else { + token = parser.nextToken(); + } + ensureExpectedToken(Token.START_OBJECT, token, parser::getTokenLocation); + token = parser.nextToken(); + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + return innerFromXContent(parser); + } + + public static Status innerFromXContent(XContentParser parser) throws IOException { + Token token = parser.currentToken(); + String fieldName = parser.currentName(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + StatusBuilder builder = new StatusBuilder(); + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == Token.START_OBJECT) { + if (fieldName.equals(Status.RETRIES_FIELD)) { + builder.setRetries(Status.RETRIES_PARSER.parse(parser, null)); + } else { + parser.skipChildren(); + } + } else if (token == Token.START_ARRAY) { + if (fieldName.equals(Status.SLICES_FIELD)) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { + builder.addToSliceStatuses(StatusOrException.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { // else if it is a value + switch (fieldName) { + case Status.SLICE_ID_FIELD: + builder.setSliceId(parser.intValue()); + break; + case Status.TOTAL_FIELD: + builder.setTotal(parser.longValue()); + break; + case Status.UPDATED_FIELD: + builder.setUpdated(parser.longValue()); + break; + case Status.CREATED_FIELD: + builder.setCreated(parser.longValue()); + break; + case Status.DELETED_FIELD: + builder.setDeleted(parser.longValue()); + break; + case Status.BATCHES_FIELD: + builder.setBatches(parser.intValue()); + break; + case Status.VERSION_CONFLICTS_FIELD: + builder.setVersionConflicts(parser.longValue()); + break; + case Status.NOOPS_FIELD: + builder.setNoops(parser.longValue()); + break; + case Status.THROTTLED_RAW_FIELD: + builder.setThrottled(parser.longValue()); + break; + case Status.REQUESTS_PER_SEC_FIELD: + builder.setRequestsPerSecond(parser.floatValue()); + break; + case Status.CANCELED_FIELD: + builder.setReasonCancelled(parser.text()); + break; + case Status.THROTTLED_UNTIL_RAW_FIELD: + builder.setThrottledUntil(parser.longValue()); + break; + default: + break; + } + } + } + return builder.buildStatus(); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); @@ -533,6 +807,44 @@ public List getSliceStatuses() { return sliceStatuses; } + @Override + public int hashCode() { + return Objects.hash( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, searchRetries, + bulkRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil, sliceStatuses + ); + } + + public boolean equalsWithoutSliceStatus(Object o, boolean includeUpdated, boolean includeCreated) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Status other = (Status) o; + return + Objects.equals(sliceId, other.sliceId) && + total == other.total && + (!includeUpdated || updated == other.updated) && + (!includeCreated || created == other.created) && + deleted == other.deleted && + batches == other.batches && + versionConflicts == other.versionConflicts && + noops == other.noops && + searchRetries == other.searchRetries && + bulkRetries == other.bulkRetries && + Objects.equals(throttled, other.throttled) && + requestsPerSecond == other.requestsPerSecond && + Objects.equals(reasonCancelled, other.reasonCancelled) && + Objects.equals(throttledUntil, other.throttledUntil); + } + + @Override + public boolean equals(Object o) { + if (equalsWithoutSliceStatus(o, true, true)) { + return Objects.equals(sliceStatuses, ((Status) o).sliceStatuses); + } else { + return false; + } + } + private int checkPositive(int value, String name) { if (value < 0) { throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]"); @@ -556,6 +868,19 @@ public static class StatusOrException implements Writeable, ToXContentObject { private final Status status; private final Exception exception; + public static Set EXPECTED_EXCEPTION_FIELDS = new HashSet<>(); + static { + EXPECTED_EXCEPTION_FIELDS.add("type"); + EXPECTED_EXCEPTION_FIELDS.add("reason"); + EXPECTED_EXCEPTION_FIELDS.add("caused_by"); + EXPECTED_EXCEPTION_FIELDS.add("suppressed"); + EXPECTED_EXCEPTION_FIELDS.add("stack_trace"); + EXPECTED_EXCEPTION_FIELDS.add("header"); + EXPECTED_EXCEPTION_FIELDS.add("error"); + EXPECTED_EXCEPTION_FIELDS.add("root_cause"); + } + + public StatusOrException(Status status) { this.status = status; exception = null; @@ -610,6 +935,48 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + /** + * Since {@link StatusOrException} can contain either an {@link Exception} or a {@link Status} we need to peek + * at a field first before deciding what needs to be parsed since the same object could contains either. + * The {@link #EXPECTED_EXCEPTION_FIELDS} contains the fields that are expected when the serialised object + * was an instance of exception and the {@link Status#FIELDS_SET} is the set of fields expected when the + * serialized object was an instance of Status. + */ + public static StatusOrException fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == Token.VALUE_NULL) { + return null; + } else { + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + token = parser.nextToken(); + // This loop is present only to ignore unknown tokens. It breaks as soon as we find a field + // that is allowed. + while (token != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + String fieldName = parser.currentName(); + // weird way to ignore unknown tokens + if (Status.FIELDS_SET.contains(fieldName)) { + return new StatusOrException( + Status.innerFromXContent(parser) + ); + } else if (EXPECTED_EXCEPTION_FIELDS.contains(fieldName)){ + return new StatusOrException(ElasticsearchException.innerFromXContent(parser, false)); + } else { + // Ignore unknown tokens + token = parser.nextToken(); + if (token == Token.START_OBJECT || token == Token.START_ARRAY) { + parser.skipChildren(); + } + token = parser.nextToken(); + } + } + throw new XContentParseException("Unable to parse StatusFromException. Expected fields not found."); + } + } + @Override public boolean equals(Object obj) { if (obj == null) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index e45d039edaeae..52a1c89d4b3f5 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -26,6 +26,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -39,7 +44,8 @@ * of reasons, not least of which that scripts are allowed to change the destination request in drastic ways, including changing the index * to which documents are written. */ -public class ReindexRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { +public class ReindexRequest extends AbstractBulkIndexByScrollRequest + implements CompositeIndicesRequest, ToXContentObject { /** * Prototype for index requests. */ @@ -48,9 +54,10 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest 0) { + builder.field("size", getSize()); + } + if (getScript() != null) { + builder.field("script", getScript()); + } + if (isAbortOnVersionConflict() == false) { + builder.field("conflicts", "proceed"); + } + } + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 70f79a9def605..e255b4db34e35 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -26,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -35,7 +37,7 @@ import static java.util.Objects.requireNonNull; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -public class RemoteInfo implements Writeable { +public class RemoteInfo implements Writeable, ToXContentObject { /** * Default {@link #socketTimeout} for requests that don't have one set. */ @@ -92,13 +94,8 @@ public RemoteInfo(StreamInput in) throws IOException { headers.put(in.readString(), in.readString()); } this.headers = unmodifiableMap(headers); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - socketTimeout = in.readTimeValue(); - connectTimeout = in.readTimeValue(); - } else { - socketTimeout = DEFAULT_SOCKET_TIMEOUT; - connectTimeout = DEFAULT_CONNECT_TIMEOUT; - } + socketTimeout = in.readTimeValue(); + connectTimeout = in.readTimeValue(); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { pathPrefix = in.readOptionalString(); } else { @@ -119,10 +116,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(header.getKey()); out.writeString(header.getValue()); } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeTimeValue(socketTimeout); - out.writeTimeValue(connectTimeout); - } + out.writeTimeValue(socketTimeout); + out.writeTimeValue(connectTimeout); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalString(pathPrefix); } @@ -197,4 +192,25 @@ public String toString() { } return b.toString(); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (username != null) { + builder.field("username", username); + } + if (password != null) { + builder.field("password", password); + } + builder.field("host", scheme + "://" + host + ":" + port + + (pathPrefix == null ? "" : "/" + pathPrefix)); + if (headers.size() >0 ) { + builder.field("headers", headers); + } + builder.field("socket_timeout", socketTimeout.getStringRep()); + builder.field("connect_timeout", connectTimeout.getStringRep()); + builder.field("query", query); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index 917b57a9c9745..a3901bb7a568b 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -284,6 +284,11 @@ public static class SearchFailure implements Writeable, ToXContentObject { @Nullable private final String nodeId; + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String NODE_FIELD = "node"; + public static final String REASON_FIELD = "reason"; + public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId) { this.index = index; this.shardId = shardId; @@ -337,15 +342,15 @@ public String getNodeId() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (index != null) { - builder.field("index", index); + builder.field(INDEX_FIELD, index); } if (shardId != null) { - builder.field("shard", shardId); + builder.field(SHARD_FIELD, shardId); } if (nodeId != null) { - builder.field("node", nodeId); + builder.field(NODE_FIELD, nodeId); } - builder.field("reason"); + builder.field(REASON_FIELD); { builder.startObject(); ElasticsearchException.generateThrowableXContent(builder, params, reason); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index eb4fd59a7bc5f..71ffadc930327 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -24,6 +24,9 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -34,16 +37,22 @@ * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements IndicesRequest.Replaceable { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest + implements IndicesRequest.Replaceable, ToXContentObject { /** * Ingest pipeline to set on index requests made by this action. */ private String pipeline; public UpdateByQueryRequest() { + this(new SearchRequest()); } - public UpdateByQueryRequest(SearchRequest search) { + public UpdateByQueryRequest(String... indices) { + this(new SearchRequest(indices)); + } + + UpdateByQueryRequest(SearchRequest search) { this(search, true); } @@ -59,8 +68,81 @@ private UpdateByQueryRequest(SearchRequest search, boolean setDefaults) { /** * Set the ingest pipeline to set on index requests made by this action. */ - public void setPipeline(String pipeline) { + public UpdateByQueryRequest setPipeline(String pipeline) { this.pipeline = pipeline; + return this; + } + + /** + * Set the query for selective update + */ + public UpdateByQueryRequest setQuery(QueryBuilder query) { + if (query != null) { + getSearchRequest().source().query(query); + } + return this; + } + + /** + * Set the document types for the update + */ + public UpdateByQueryRequest setDocTypes(String... types) { + if (types != null) { + getSearchRequest().types(types); + } + return this; + } + + /** + * Set routing limiting the process to the shards that match that routing value + */ + public UpdateByQueryRequest setRouting(String routing) { + if (routing != null) { + getSearchRequest().routing(routing); + } + return this; + } + + /** + * The scroll size to control number of documents processed per batch + */ + public UpdateByQueryRequest setBatchSize(int size) { + getSearchRequest().source().size(size); + return this; + } + + /** + * Set the IndicesOptions for controlling unavailable indices + */ + public UpdateByQueryRequest setIndicesOptions(IndicesOptions indicesOptions) { + getSearchRequest().indicesOptions(indicesOptions); + return this; + } + + /** + * Gets the batch size for this request + */ + public int getBatchSize() { + return getSearchRequest().source().size(); + } + + /** + * Gets the routing value used for this request + */ + public String getRouting() { + return getSearchRequest().routing(); + } + + /** + * Gets the document types on which this request would be executed. Returns an empty array if all + * types are to be processed. + */ + public String[] getDocTypes() { + if (getSearchRequest().types() != null) { + return getSearchRequest().types(); + } else { + return new String[0]; + } } /** @@ -121,4 +203,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(pipeline); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (getScript() != null) { + builder.field("script"); + getScript().toXContent(builder, params); + } + getSearchRequest().source().innerToXContent(builder, params); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 8b33f2df8b16c..89cebf38a4013 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -120,7 +120,7 @@ public Query parseGroup(Type type, String field, Float boostValue, Object value, private Query combineGrouped(List groupQuery) { if (groupQuery == null || groupQuery.isEmpty()) { - return new MatchNoDocsQuery("[multi_match] list of group queries was empty"); + return zeroTermsQuery(); } if (groupQuery.size() == 1) { return groupQuery.get(0); diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index df96ff87ec256..d3bac583eac68 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -19,47 +19,21 @@ package org.elasticsearch.index.search; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.IpFieldMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; /** * Helpers to extract and expand field names and boosts */ public final class QueryParserHelper { - // Mapping types the "all-ish" query can be executed against - // TODO: Fix the API so that we don't need a hardcoded list of types - private static final Set ALLOWED_QUERY_MAPPER_TYPES; - - static { - ALLOWED_QUERY_MAPPER_TYPES = new HashSet<>(); - ALLOWED_QUERY_MAPPER_TYPES.add(DateFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(IpFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(KeywordFieldMapper.CONTENT_TYPE); - for (NumberFieldMapper.NumberType nt : NumberFieldMapper.NumberType.values()) { - ALLOWED_QUERY_MAPPER_TYPES.add(nt.typeName()); - } - ALLOWED_QUERY_MAPPER_TYPES.add("scaled_float"); - ALLOWED_QUERY_MAPPER_TYPES.add(TextFieldMapper.CONTENT_TYPE); - } - private QueryParserHelper() {} /** @@ -85,22 +59,6 @@ public static Map parseFieldsAndWeights(List fields) { return fieldsAndWeights; } - /** - * Get a {@link FieldMapper} associated with a field name or null. - * @param mapperService The mapper service where to find the mapping. - * @param field The field name to search. - */ - public static Mapper getFieldMapper(MapperService mapperService, String field) { - DocumentMapper mapper = mapperService.documentMapper(); - if (mapper != null) { - Mapper fieldMapper = mapper.mappers().getMapper(field); - if (fieldMapper != null) { - return fieldMapper; - } - } - return null; - } - public static Map resolveMappingFields(QueryShardContext context, Map fieldsAndWeights) { return resolveMappingFields(context, fieldsAndWeights, null); @@ -138,8 +96,7 @@ public static Map resolveMappingFields(QueryShardContext context, * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. */ public static Map resolveMappingField(QueryShardContext context, String fieldOrPattern, float weight, @@ -154,8 +111,7 @@ public static Map resolveMappingField(QueryShardContext context, * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. * @param fieldSuffix The suffix name to add to the expanded field names if a mapping exists for that name. * The original name of the field is kept if adding the suffix to the field name does not point to a valid field @@ -177,18 +133,20 @@ public static Map resolveMappingField(QueryShardContext context, continue; } - // Ignore fields that are not in the allowed mapper types. Some - // types do not support term queries, and thus we cannot generate - // a special query for them. - String mappingType = fieldType.typeName(); - if (acceptAllTypes == false && ALLOWED_QUERY_MAPPER_TYPES.contains(mappingType) == false) { + if (acceptMetadataField == false && fieldType.name().startsWith("_")) { + // Ignore metadata fields continue; } - // Ignore metadata fields. - Mapper mapper = getFieldMapper(context.getMapperService(), fieldName); - if (acceptMetadataField == false && mapper instanceof MetadataFieldMapper) { - continue; + if (acceptAllTypes == false) { + try { + fieldType.termQuery("", context); + } catch (QueryShardException |UnsupportedOperationException e) { + // field type is never searchable with term queries (eg. geo point): ignore + continue; + } catch (IllegalArgumentException |ElasticsearchParseException e) { + // other exceptions are parsing errors or not indexed fields: keep + } } fields.put(fieldName, weight); } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 50406ed583487..fa2fd033bee0d 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -280,14 +280,14 @@ protected Query newMatchAllDocsQuery() { @Override public Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { - if (quoted) { - return getFieldQuery(field, queryText, getPhraseSlop()); - } - if (field != null && EXISTS_FIELD.equals(field)) { return existsQuery(queryText); } + if (quoted) { + return getFieldQuery(field, queryText, getPhraseSlop()); + } + // Detects additional operators '<', '<=', '>', '>=' to handle range query with one side unbounded. // It is required to use a prefix field operator to enable the detection since they are not treated // as logical operator by the query parser (e.g. age:>=10). @@ -333,6 +333,10 @@ public Query getFieldQuery(String field, String queryText, boolean quoted) throw @Override protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { + if (field != null && EXISTS_FIELD.equals(field)) { + return existsQuery(queryText); + } + Map fields = extractMultiFields(field, true); if (fields.isEmpty()) { return newUnmappedFieldQuery(field); @@ -347,6 +351,9 @@ protected Query getFieldQuery(String field, String queryText, int slop) throws P } queryBuilder.setPhraseSlop(slop); Query query = queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, fields, queryText, null); + if (query == null) { + return null; + } return applySlop(query, slop); } catch (IOException e) { throw new ParseException(e.getMessage()); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 25073d7ce15a1..f1e7dec6995a0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1314,7 +1314,7 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce **/ public void openEngineAndRecoverFromTranslog() throws IOException { innerOpenEngineAndTranslog(); - getEngine().recoverFromTranslog(); + getEngine().recoverFromTranslog(Long.MAX_VALUE); } /** @@ -1332,7 +1332,7 @@ private void innerOpenEngineAndTranslog() throws IOException { } recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); // also check here, before we apply the translog - if (Booleans.isTrue(checkIndexOnStartup)) { + if (Booleans.isTrue(checkIndexOnStartup) || "checksum".equals(checkIndexOnStartup)) { try { checkIndex(); } catch (IOException ex) { @@ -1635,15 +1635,6 @@ public Closeable acquireRetentionLockForPeerRecovery() { return getEngine().acquireRetentionLockForPeerRecovery(); } - /** - * Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#. - * The caller has to close the returned snapshot after finishing the reading. - */ - public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - // TODO: Remove this method after primary-replica resync use soft-deletes - return getEngine().newSnapshotFromMinSeqNo(minSeqNo); - } - /** * Returns the estimated number of history operations whose seq# at least the provided seq# in this shard. */ @@ -1668,19 +1659,18 @@ public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) t } /** - * Creates a new "translog" snapshot from Lucene for reading operations whose seqno is between minSeqNo and maxSeqNo. - * The caller has to close the returned snapshot after finishing the reading. + * Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive) + * and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading. * * @param source the source of the request - * @param minSeqNo the min_seqno to read - inclusive - * @param maxSeqNo the max_seqno to read - inclusive - * @param requiredFullRange if true then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException} - * if any operation between minSeqNo and maxSeqNo is missing. This parameter should be only - * enabled when the requesting range is below the global checkpoint. + * @param fromSeqNo the from seq_no (inclusive) to read + * @param toSeqNo the to seq_no (inclusive) to read + * @param requiredFullRange if {@code true} then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException} + * if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing. + * This parameter should be only enabled when the entire requesting range is below the global checkpoint. */ - public Translog.Snapshot newLuceneChangesSnapshot(String source, long minSeqNo, long maxSeqNo, - boolean requiredFullRange) throws IOException { - return getEngine().newLuceneChangesSnapshot(source, mapperService, minSeqNo, maxSeqNo, requiredFullRange); + public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange); } public List segments(boolean verbose) { @@ -1965,6 +1955,9 @@ void checkIndex() throws IOException { if (store.tryIncRef()) { try { doCheckIndex(); + } catch (IOException e) { + store.markStoreCorrupted(e); + throw e; } finally { store.decRef(); } @@ -2008,18 +2001,7 @@ private void doCheckIndex() throws IOException { return; } logger.warn("check index [failure]\n{}", os.bytes().utf8ToString()); - if ("fix".equals(checkIndexOnStartup)) { - if (logger.isDebugEnabled()) { - logger.debug("fixing index, writing new segments file ..."); - } - store.exorciseIndex(status); - if (logger.isDebugEnabled()) { - logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName); - } - } else { - // only throw a failure if we are not going to fix the index - throw new IllegalStateException("index check failure but can't fix it"); - } + throw new IOException("index check failure"); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 909039cea4b76..016a8afff6964 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -89,8 +89,7 @@ public void resync(final IndexShard indexShard, final ActionListener // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Also fail the resync early if the shard is shutting down - // TODO: A follow-up to make resync using soft-deletes - snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); + snapshot = indexShard.getHistoryOperations("resync", startingSeqNo); final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index f809d4d5227e6..ae3f90e63e7d1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -156,7 +156,7 @@ void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory ta final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index fc60543006648..f95cdb3a9f692 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FileSwitchDirectory; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; @@ -77,10 +76,21 @@ public Directory newDirectory() throws IOException { } protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - IndexModule.Type.FS.getSettingsKey()); + final String storeType = + indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()); if (IndexModule.Type.FS.match(storeType)) { - return FSDirectory.open(location, lockFactory); // use lucene defaults + final IndexModule.Type type = + IndexModule.defaultStoreType(IndexModule.NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings())); + switch (type) { + case MMAPFS: + return new MMapDirectory(location, lockFactory); + case SIMPLEFS: + return new SimpleFSDirectory(location, lockFactory); + case NIOFS: + return new NIOFSDirectory(location, lockFactory); + default: + throw new AssertionError("unexpected built-in store type [" + type + "]"); + } } else if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new SimpleFSDirectory(location, lockFactory); } else if (IndexModule.Type.NIOFS.match(storeType)) { diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 68b672de1ee25..a00d779ca7712 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -134,7 +134,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0 static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; - static final String CORRUPTED = "corrupted_"; + // public is for test purposes + public static final String CORRUPTED = "corrupted_"; public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); @@ -360,18 +361,6 @@ public CheckIndex.Status checkIndex(PrintStream out) throws IOException { } } - /** - * Repairs the index using the previous returned status from {@link #checkIndex(PrintStream)}. - */ - public void exorciseIndex(CheckIndex.Status status) throws IOException { - metadataLock.writeLock().lock(); - try (CheckIndex checkIndex = new CheckIndex(directory)) { - checkIndex.exorciseIndex(status); - } finally { - metadataLock.writeLock().unlock(); - } - } - public StoreStats stats() throws IOException { ensureOpen(); return new StoreStats(directory.estimateSize()); @@ -1594,7 +1583,7 @@ private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openM throws IOException { assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine diff --git a/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java new file mode 100644 index 0000000000000..b823a920039b5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import java.util.concurrent.atomic.AtomicReference; + +public class TragicExceptionHolder { + private final AtomicReference tragedy = new AtomicReference<>(); + + /** + * Sets the tragic exception or if the tragic exception is already set adds passed exception as suppressed exception + * @param ex tragic exception to set + */ + public void setTragicException(Exception ex) { + assert ex != null; + if (tragedy.compareAndSet(null, ex) == false) { + if (tragedy.get() != ex) { // to ensure there is no self-suppression + tragedy.get().addSuppressed(ex); + } + } + } + + public Exception get() { + return tragedy.get(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 279bdb1904643..f17acac37896d 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -66,6 +66,7 @@ import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -117,6 +118,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; + protected final TragicExceptionHolder tragedy = new TragicExceptionHolder(); private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final LongSupplier globalCheckpointSupplier; @@ -310,8 +312,28 @@ public boolean isOpen() { return closed.get() == false; } + private static boolean calledFromOutsideOrViaTragedyClose() { + List frames = Stream.of(Thread.currentThread().getStackTrace()). + skip(3). //skip getStackTrace, current method and close method frames + limit(10). //limit depth of analysis to 10 frames, it should be enough to catch closing with, e.g. IOUtils + filter(f -> + { + try { + return Translog.class.isAssignableFrom(Class.forName(f.getClassName())); + } catch (Exception ignored) { + return false; + } + } + ). //find all inner callers including Translog subclasses + collect(Collectors.toList()); + //the list of inner callers should be either empty or should contain closeOnTragicEvent method + return frames.isEmpty() || frames.stream().anyMatch(f -> f.getMethodName().equals("closeOnTragicEvent")); + } + @Override public void close() throws IOException { + assert calledFromOutsideOrViaTragedyClose() : + "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { @@ -462,7 +484,7 @@ TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, lon getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong()); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -555,21 +577,27 @@ public long getLastSyncedGlobalCheckpoint() { */ public Snapshot newSnapshot() throws IOException { try (ReleasableLock ignored = readLock.acquire()) { - return newSnapshotFromGen(getMinFileGeneration()); + return newSnapshotFromGen(new TranslogGeneration(translogUUID, getMinFileGeneration()), Long.MAX_VALUE); } } - public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { + public Snapshot newSnapshotFromGen(TranslogGeneration fromGeneration, long upToSeqNo) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); - if (minGeneration < getMinFileGeneration()) { - throw new IllegalArgumentException("requested snapshot generation [" + minGeneration + "] is not available. " + + final long fromFileGen = fromGeneration.translogFileGeneration; + if (fromFileGen < getMinFileGeneration()) { + throw new IllegalArgumentException("requested snapshot generation [" + fromFileGen + "] is not available. " + "Min referenced generation is [" + getMinFileGeneration() + "]"); } TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) - .filter(reader -> reader.getGeneration() >= minGeneration) + .filter(reader -> reader.getGeneration() >= fromFileGen && reader.getCheckpoint().minSeqNo <= upToSeqNo) .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); - return newMultiSnapshot(snapshots); + final Snapshot snapshot = newMultiSnapshot(snapshots); + if (upToSeqNo == Long.MAX_VALUE) { + return snapshot; + } else { + return new SeqNoFilterSnapshot(snapshot, Long.MIN_VALUE, upToSeqNo); + } } } @@ -726,7 +754,8 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { } } catch (IOException e) { IOUtils.closeWhileHandlingException(newReaders); - close(); + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } @@ -779,10 +808,10 @@ public boolean ensureSynced(Stream locations) throws IOException { * * @param ex if an exception occurs closing the translog, it will be suppressed into the provided exception */ - private void closeOnTragicEvent(final Exception ex) { + protected void closeOnTragicEvent(final Exception ex) { // we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock assert readLock.isHeldByCurrentThread() == false : Thread.currentThread().getName(); - if (current.getTragicException() != null) { + if (tragedy.get() != null) { try { close(); } catch (final AlreadyClosedException inner) { @@ -903,7 +932,59 @@ default int overriddenOperations() { * Returns the next operation in the snapshot or null if we reached the end. */ Translog.Operation next() throws IOException; + } + /** + * A filtered snapshot consisting of only operations whose sequence numbers are in the given range + * between {@code fromSeqNo} (inclusive) and {@code toSeqNo} (inclusive). This filtered snapshot + * shares the same underlying resources with the {@code delegate} snapshot, therefore we should not + * use the {@code delegate} after passing it to this filtered snapshot. + */ + static final class SeqNoFilterSnapshot implements Snapshot { + private final Snapshot delegate; + private int filteredOpsCount; + private final long fromSeqNo; // inclusive + private final long toSeqNo; // inclusive + + SeqNoFilterSnapshot(Snapshot delegate, long fromSeqNo, long toSeqNo) { + assert fromSeqNo <= toSeqNo : "from_seq_no[" + fromSeqNo + "] > to_seq_no[" + toSeqNo + "]"; + this.delegate = delegate; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + } + + @Override + public int totalOperations() { + return delegate.totalOperations(); + } + + @Override + public int skippedOperations() { + return filteredOpsCount + delegate.skippedOperations(); + } + + @Override + public int overriddenOperations() { + return delegate.overriddenOperations(); + } + + @Override + public Operation next() throws IOException { + Translog.Operation op; + while ((op = delegate.next()) != null) { + if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo) { + return op; + } else { + filteredOpsCount++; + } + } + return null; + } + + @Override + public void close() throws IOException { + delegate.close(); + } } /** @@ -1559,7 +1640,8 @@ public void rollGeneration() throws IOException { current = createWriter(current.getGeneration() + 1); logger.trace("current translog set to [{}]", current.getGeneration()); } catch (final Exception e) { - IOUtils.closeWhileHandlingException(this); // tragic event + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } } @@ -1672,7 +1754,7 @@ long getFirstOperationPosition() { // for testing private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed", current.getTragicException()); + throw new AlreadyClosedException("translog is already closed", tragedy.get()); } } @@ -1686,7 +1768,7 @@ ChannelFactory getChannelFactory() { * Otherwise (no tragic exception has occurred) it returns null. */ public Exception getTragicException() { - return current.getTragicException(); + return tragedy.get(); } /** Reads and returns the current checkpoint */ @@ -1769,8 +1851,8 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S final String translogUUID = UUIDs.randomBase64UUID(); TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm - ); + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, + new TragicExceptionHolder()); writer.close(); return translogUUID; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 336cd8f5eeab7..e0cfe9eaaff0b 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -52,7 +52,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { /* the number of translog operations written to this file */ private volatile int operationCounter; /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ - private volatile Exception tragedy; + private final TragicExceptionHolder tragedy; /* A buffered outputstream what writes to the writers channel */ private final OutputStream outputStream; /* the total offset of this file including the bytes written to the file as well as into the buffer */ @@ -77,7 +77,10 @@ private TranslogWriter( final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException { + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header, + TragicExceptionHolder tragedy) + throws + IOException { super(initialCheckpoint.generation, channel, path, header); assert initialCheckpoint.offset == channel.position() : "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position [" @@ -95,12 +98,13 @@ private TranslogWriter( assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; + this.tragedy = tragedy; } public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, - final long primaryTerm) + final long primaryTerm, TragicExceptionHolder tragedy) throws IOException { final FileChannel channel = channelFactory.open(file); try { @@ -121,7 +125,7 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -130,24 +134,8 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f } } - /** - * If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, - * e.g. disk full while flushing a new segment, this returns the root cause exception. - * Otherwise (no tragic exception has occurred) it returns null. - */ - public Exception getTragicException() { - return tragedy; - } - private synchronized void closeWithTragicEvent(final Exception ex) { - assert ex != null; - if (tragedy == null) { - tragedy = ex; - } else if (tragedy != ex) { - // it should be safe to call closeWithTragicEvents on multiple layers without - // worrying about self suppression. - tragedy.addSuppressed(ex); - } + tragedy.setTragicException(ex); try { close(); } catch (final IOException | RuntimeException e) { @@ -206,7 +194,6 @@ private synchronized boolean assertNoSeqNumberConflict(long seqNo, BytesReferenc Translog.Operation prvOp = Translog.readOperation( new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion")); // TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp. - // We don't store versionType in Lucene index, we need to exclude it from this check final boolean sameOp; if (newOp instanceof Translog.Index && prvOp instanceof Translog.Index) { final Translog.Index o1 = (Translog.Index) prvOp; @@ -315,7 +302,8 @@ public TranslogReader closeIntoReader() throws IOException { if (closed.compareAndSet(false, true)) { return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header); } else { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", + tragedy.get()); } } } @@ -425,7 +413,7 @@ Checkpoint getLastSyncedCheckpoint() { protected final void ensureOpen() { if (isClosed()) { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy.get()); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 8239593c2a528..a90f8af0af42c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -178,7 +178,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th terminal.println("Marking index with the new history uuid"); // commit the new histroy id IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 39346fecbef25..1c83a880511cd 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -228,12 +228,20 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; + + // do not allow any plugin-provided index store type to conflict with a built-in type + for (final String indexStoreType : indexStoreFactories.keySet()) { + if (IndexModule.isBuiltinType(indexStoreType)) { + throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); + } + } + this.indexStoreFactories = indexStoreFactories; } @Override protected void doStop() { - ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); + ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory(settings, "indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 7e6a9c29a8344..3d05293f7b719 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -251,7 +251,16 @@ private ParentMemoryUsage parentUsed(long newBytesReserved) { //package private to allow overriding it in tests long currentMemoryUsage() { - return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed(); + try { + return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed(); + } catch (IllegalArgumentException ex) { + // This exception can happen (rarely) due to a race condition in the JVM when determining usage of memory pools. We do not want + // to fail requests because of this and thus return zero memory usage in this case. While we could also return the most + // recently determined memory usage, we would overestimate memory usage immediately after a garbage collection event. + assert ex.getMessage().matches("committed = \\d+ should be < max = \\d+"); + logger.info("Cannot determine current memory usage due to JDK-8207200.", ex); + return 0; + } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index e6a86d47f55c0..692010119dc2d 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -456,7 +456,7 @@ private void createIndices(final ClusterState state) { AllocatedIndex indexService = null; try { indexService = indicesService.createIndex(indexMetaData, buildInIndexListener); - if (indexService.updateMapping(indexMetaData) && sendRefreshMapping) { + if (indexService.updateMapping(null, indexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), indexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) @@ -490,7 +490,7 @@ private void updateIndices(ClusterChangedEvent event) { if (ClusterChangedEvent.indexMetaDataChanged(currentIndexMetaData, newIndexMetaData)) { indexService.updateMetaData(newIndexMetaData); try { - if (indexService.updateMapping(newIndexMetaData) && sendRefreshMapping) { + if (indexService.updateMapping(currentIndexMetaData, newIndexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(newIndexMetaData.getIndex().getName(), newIndexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) @@ -778,7 +778,7 @@ public interface AllocatedIndex extends Iterable, IndexCompo /** * Checks if index requires refresh from master. */ - boolean updateMapping(IndexMetaData indexMetaData) throws IOException; + boolean updateMapping(IndexMetaData currentIndexMetaData, IndexMetaData newIndexMetaData) throws IOException; /** * Returns shard with given id. diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index f01b4bb312174..fb7885a217e01 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -560,9 +560,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { } boolean includeNumDocs(Version version) { - if (version.major == Version.V_5_6_8.major) { - return version.onOrAfter(Version.V_5_6_8); - } return version.onOrAfter(Version.V_6_2_2); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ed53f6c3e118..10f796e5e1551 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -162,12 +162,13 @@ public RecoveryResponse recoverToTarget() throws IOException { } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } - // we set this to 0 to create a translog roughly according to the retention policy - // on the target. Note that it will still filter out legacy operations with no sequence numbers - startingSeqNo = 0; //TODO: A follow-up to send only ops above the local checkpoint if soft-deletes enabled. - // but we must have everything above the local checkpoint in the commit + // We must have everything above the local checkpoint in the commit requiredSeqNoRangeStart = Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + // If soft-deletes enabled, we need to transfer only operations after the local_checkpoint of the commit to have + // the same history on the target. However, with translog, we need to set this to 0 to create a translog roughly + // according to the retention policy on the target. Note that it will still filter out legacy operations without seqNo. + startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); phase1(phase1Snapshot.getIndexCommit(), () -> estimateNumOps); diff --git a/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java new file mode 100644 index 0000000000000..d1eb651acae03 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java @@ -0,0 +1,381 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.elasticsearch.script.IngestConditionalScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; + +public class ConditionalProcessor extends AbstractProcessor { + + static final String TYPE = "conditional"; + + private final Script condition; + + private final ScriptService scriptService; + + private final Processor processor; + + ConditionalProcessor(String tag, Script script, ScriptService scriptService, Processor processor) { + super(tag); + this.condition = script; + this.scriptService = scriptService; + this.processor = processor; + } + + @Override + public void execute(IngestDocument ingestDocument) throws Exception { + IngestConditionalScript script = + scriptService.compile(condition, IngestConditionalScript.CONTEXT).newInstance(condition.getParams()); + if (script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata()))) { + processor.execute(ingestDocument); + } + } + + @Override + public String getType() { + return TYPE; + } + + private static Object wrapUnmodifiable(Object raw) { + // Wraps all mutable types that the JSON parser can create by immutable wrappers. + // Any inputs not wrapped are assumed to be immutable + if (raw instanceof Map) { + return new UnmodifiableIngestData((Map) raw); + } else if (raw instanceof List) { + return new UnmodifiableIngestList((List) raw); + } else if (raw instanceof byte[]) { + return ((byte[]) raw).clone(); + } + return raw; + } + + private static UnsupportedOperationException unmodifiableException() { + return new UnsupportedOperationException("Mutating ingest documents in conditionals is not supported"); + } + + private static final class UnmodifiableIngestData implements Map { + + private final Map data; + + UnmodifiableIngestData(Map data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return data.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return data.containsValue(value); + } + + @Override + public Object get(final Object key) { + return wrapUnmodifiable(data.get(key)); + } + + @Override + public Object put(final String key, final Object value) { + throw unmodifiableException(); + } + + @Override + public Object remove(final Object key) { + throw unmodifiableException(); + } + + @Override + public void putAll(final Map m) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Set keySet() { + return Collections.unmodifiableSet(data.keySet()); + } + + @Override + public Collection values() { + return new UnmodifiableIngestList(new ArrayList<>(data.values())); + } + + @Override + public Set> entrySet() { + return data.entrySet().stream().map(entry -> + new Entry() { + @Override + public String getKey() { + return entry.getKey(); + } + + @Override + public Object getValue() { + return wrapUnmodifiable(entry.getValue()); + } + + @Override + public Object setValue(final Object value) { + throw unmodifiableException(); + } + + @Override + public boolean equals(final Object o) { + return entry.equals(o); + } + + @Override + public int hashCode() { + return entry.hashCode(); + } + }).collect(Collectors.toSet()); + } + } + + private static final class UnmodifiableIngestList implements List { + + private final List data; + + UnmodifiableIngestList(List data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return data.contains(o); + } + + @Override + public Iterator iterator() { + Iterator wrapped = data.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return wrapped.hasNext(); + } + + @Override + public Object next() { + return wrapped.next(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + }; + } + + @Override + public Object[] toArray() { + Object[] wrapped = data.toArray(new Object[0]); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public T[] toArray(final T[] a) { + Object[] raw = data.toArray(new Object[0]); + T[] wrapped = (T[]) Arrays.copyOf(raw, a.length, a.getClass()); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = (T) wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public boolean add(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean remove(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean containsAll(final Collection c) { + return data.contains(c); + } + + @Override + public boolean addAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Object get(final int index) { + return wrapUnmodifiable(data.get(index)); + } + + @Override + public Object set(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public void add(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public Object remove(final int index) { + throw unmodifiableException(); + } + + @Override + public int indexOf(final Object o) { + return data.indexOf(o); + } + + @Override + public int lastIndexOf(final Object o) { + return data.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return new UnmodifiableListIterator(data.listIterator()); + } + + @Override + public ListIterator listIterator(final int index) { + return new UnmodifiableListIterator(data.listIterator(index)); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + return new UnmodifiableIngestList(data.subList(fromIndex, toIndex)); + } + + private static final class UnmodifiableListIterator implements ListIterator { + + private final ListIterator data; + + UnmodifiableListIterator(ListIterator data) { + this.data = data; + } + + @Override + public boolean hasNext() { + return data.hasNext(); + } + + @Override + public Object next() { + return wrapUnmodifiable(data.next()); + } + + @Override + public boolean hasPrevious() { + return data.hasPrevious(); + } + + @Override + public Object previous() { + return wrapUnmodifiable(data.previous()); + } + + @Override + public int nextIndex() { + return data.nextIndex(); + } + + @Override + public int previousIndex() { + return data.previousIndex(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + + @Override + public void set(final Object o) { + throw unmodifiableException(); + } + + @Override + public void add(final Object o) { + throw unmodifiableException(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 54d06d116552f..d4f27f47eb8f2 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -19,9 +19,18 @@ package org.elasticsearch.ingest; +import java.io.IOException; +import java.io.InputStream; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; @@ -296,6 +305,7 @@ public static ElasticsearchException newConfigurationException(String processorT } public static List readProcessorConfigs(List> processorConfigs, + ScriptService scriptService, Map processorFactories) throws Exception { Exception exception = null; List processors = new ArrayList<>(); @@ -303,7 +313,7 @@ public static List readProcessorConfigs(List> pro for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { try { - processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue())); + processors.add(readProcessor(processorFactories, scriptService, entry.getKey(), entry.getValue())); } catch (Exception e) { exception = ExceptionsHelper.useOrSuppress(exception, e); } @@ -356,13 +366,14 @@ private static void addMetadataToException(ElasticsearchException exception, Str @SuppressWarnings("unchecked") public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Object config) throws Exception { if (config instanceof Map) { - return readProcessor(processorFactories, type, (Map) config); + return readProcessor(processorFactories, scriptService, type, (Map) config); } else if (config instanceof String && "script".equals(type)) { Map normalizedScript = new HashMap<>(1); normalizedScript.put(ScriptType.INLINE.getParseField().getPreferredName(), config); - return readProcessor(processorFactories, type, normalizedScript); + return readProcessor(processorFactories, scriptService, type, normalizedScript); } else { throw newConfigurationException(type, null, null, "property isn't a map, but of type [" + config.getClass().getName() + "]"); @@ -370,15 +381,17 @@ public static Processor readProcessor(Map processorFa } public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Map config) throws Exception { String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + Script conditionalScript = extractConditional(config); Processor.Factory factory = processorFactories.get(type); if (factory != null) { boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, "ignore_failure", false); List> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY); - List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorFactories); + List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { throw newConfigurationException(type, tag, Pipeline.ON_FAILURE_KEY, @@ -392,14 +405,42 @@ public static Processor readProcessor(Map processorFa type, Arrays.toString(config.keySet().toArray())); } if (onFailureProcessors.size() > 0 || ignoreFailure) { - return new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); - } else { - return processor; + processor = new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); } + if (conditionalScript != null) { + processor = new ConditionalProcessor(tag, conditionalScript, scriptService, processor); + } + return processor; } catch (Exception e) { throw newConfigurationException(type, tag, null, e); } } throw newConfigurationException(type, tag, null, "No processor type exists with name [" + type + "]"); } + + private static Script extractConditional(Map config) throws IOException { + Object scriptSource = config.remove("if"); + if (scriptSource != null) { + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent) + .map(normalizeScript(scriptSource)); + InputStream stream = BytesReference.bytes(builder).streamInput(); + XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + return Script.parse(parser); + } + } + return null; + } + + @SuppressWarnings("unchecked") + private static Map normalizeScript(Object scriptConfig) { + if (scriptConfig instanceof Map) { + return (Map) scriptConfig; + } else if (scriptConfig instanceof String) { + return Collections.singletonMap("source", scriptConfig); + } else { + throw newConfigurationException("conditional", null, "script", + "property isn't a map or string, but of type [" + scriptConfig.getClass().getName() + "]"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index aad55e12cefff..e218168eeb7b5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,6 +19,9 @@ package org.elasticsearch.ingest; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Set; import org.elasticsearch.common.Strings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -55,6 +58,9 @@ public final class IngestDocument { private final Map sourceAndMetadata; private final Map ingestMetadata; + // Contains all pipelines that have been executed for this document + private final Set executedPipelines = Collections.newSetFromMap(new IdentityHashMap<>()); + public IngestDocument(String index, String type, String id, String routing, Long version, VersionType versionType, Map source) { this.sourceAndMetadata = new HashMap<>(); @@ -632,6 +638,19 @@ private static Object deepCopy(Object value) { } } + /** + * Executes the given pipeline with for this document unless the pipeline has already been executed + * for this document. + * @param pipeline Pipeline to execute + * @throws Exception On exception in pipeline execution + */ + public void executePipeline(Pipeline pipeline) throws Exception { + if (this.executedPipelines.add(pipeline) == false) { + throw new IllegalStateException("Recursive invocation of pipeline [" + pipeline.getId() + "] detected."); + } + pipeline.execute(this); + } + @Override public boolean equals(Object obj) { if (obj == this) { return true; } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 01bc402e43ba5..f0f5d76caaba8 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -22,14 +22,42 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledFuture; -import java.util.function.BiFunction; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.script.ScriptService; @@ -38,20 +66,42 @@ /** * Holder class for several ingest related services. */ -public class IngestService { +public class IngestService implements ClusterStateApplier { public static final String NOOP_PIPELINE_NAME = "_none"; - private final PipelineStore pipelineStore; - private final PipelineExecutionService pipelineExecutionService; + private final ClusterService clusterService; + private final ScriptService scriptService; + private final Map processorFactories; + // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. + // We know of all the processor factories when a node with all its plugin have been initialized. Also some + // processor factories rely on other node services. Custom metadata is statically registered when classes + // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. + private volatile Map pipelines = new HashMap<>(); + private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); - public IngestService(Settings settings, ThreadPool threadPool, + public IngestService(ClusterService clusterService, ThreadPool threadPool, Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins) { - BiFunction> scheduler = - (delay, command) -> threadPool.schedule(TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command); - Processor.Parameters parameters = new Processor.Parameters(env, scriptService, analysisRegistry, - threadPool.getThreadContext(), threadPool::relativeTimeInMillis, scheduler); + this.clusterService = clusterService; + this.scriptService = scriptService; + this.processorFactories = processorFactories( + ingestPlugins, + new Processor.Parameters( + env, scriptService, analysisRegistry, + threadPool.getThreadContext(), threadPool::relativeTimeInMillis, + (delay, command) -> threadPool.schedule( + TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command + ), this + ) + ); + this.threadPool = threadPool; + } + + private static Map processorFactories(List ingestPlugins, + Processor.Parameters parameters) { Map processorFactories = new HashMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { Map newProcessors = ingestPlugin.getProcessors(parameters); @@ -61,24 +111,393 @@ public IngestService(Settings settings, ThreadPool threadPool, } } } - this.pipelineStore = new PipelineStore(settings, Collections.unmodifiableMap(processorFactories)); - this.pipelineExecutionService = new PipelineExecutionService(pipelineStore, threadPool); + return Collections.unmodifiableMap(processorFactories); + } + + public ClusterService getClusterService() { + return clusterService; + } + + public ScriptService getScriptService() { + return scriptService; + } + + /** + * Deletes the pipeline specified by id in the request. + */ + public void delete(DeletePipelineRequest request, ActionListener listener) { + clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerDelete(request, currentState); + } + }); + } + + static ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + if (currentIngestMetadata == null) { + return currentState; + } + Map pipelines = currentIngestMetadata.getPipelines(); + Set toRemove = new HashSet<>(); + for (String pipelineKey : pipelines.keySet()) { + if (Regex.simpleMatch(request.getId(), pipelineKey)) { + toRemove.add(pipelineKey); + } + } + if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { + throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); + } else if (toRemove.isEmpty()) { + return currentState; + } + final Map pipelinesCopy = new HashMap<>(pipelines); + for (String key : toRemove) { + pipelinesCopy.remove(key); + } + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) + .build()); + return newState.build(); + } + + /** + * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines + * may be returned + */ + // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't + // know how to serialize themselves. + public static List getPipelines(ClusterState clusterState, String... ids) { + IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); + return innerGetPipelines(ingestMetadata, ids); + } + + static List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { + if (ingestMetadata == null) { + return Collections.emptyList(); + } + + // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') + if (ids.length == 0) { + return new ArrayList<>(ingestMetadata.getPipelines().values()); + } + + List result = new ArrayList<>(ids.length); + for (String id : ids) { + if (Regex.isSimpleMatchPattern(id)) { + for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { + if (Regex.simpleMatch(id, entry.getKey())) { + result.add(entry.getValue()); + } + } + } else { + PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); + if (pipeline != null) { + result.add(pipeline); + } + } + } + return result; } - public PipelineStore getPipelineStore() { - return pipelineStore; + /** + * Stores the specified pipeline definition in the request. + */ + public void putPipeline(Map ingestInfos, PutPipelineRequest request, + ActionListener listener) throws Exception { + // validates the pipeline and processor configuration before submitting a cluster update task: + validatePipeline(ingestInfos, request); + clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerPut(request, currentState); + } + }); + } + + /** + * Returns the pipeline by the specified id + */ + public Pipeline getPipeline(String id) { + return pipelines.get(id); } - public PipelineExecutionService getPipelineExecutionService() { - return pipelineExecutionService; + public Map getProcessorFactories() { + return processorFactories; } public IngestInfo info() { - Map processorFactories = pipelineStore.getProcessorFactories(); + Map processorFactories = getProcessorFactories(); List processorInfoList = new ArrayList<>(processorFactories.size()); for (Map.Entry entry : processorFactories.entrySet()) { processorInfoList.add(new ProcessorInfo(entry.getKey())); } return new IngestInfo(processorInfoList); } + + Map pipelines() { + return pipelines; + } + + @Override + public void applyClusterState(final ClusterChangedEvent event) { + ClusterState state = event.state(); + innerUpdatePipelines(event.previousState(), state); + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); + } + + static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + Map pipelines; + if (currentIngestMetadata != null) { + pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); + } else { + pipelines = new HashMap<>(); + } + + pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) + .build()); + return newState.build(); + } + + void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { + if (ingestInfos.isEmpty()) { + throw new IllegalStateException("Ingest info is empty"); + } + + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories, scriptService); + List exceptions = new ArrayList<>(); + for (Processor processor : pipeline.flattenAllProcessors()) { + for (Map.Entry entry : ingestInfos.entrySet()) { + String type = processor.getType(); + if (entry.getValue().containsProcessor(type) == false && ConditionalProcessor.TYPE.equals(type) == false) { + String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; + exceptions.add( + ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message) + ); + } + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + public void executeBulkRequest(Iterable> actionRequests, + BiConsumer itemFailureHandler, Consumer completionHandler) { + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + completionHandler.accept(e); + } + + @Override + protected void doRun() { + for (DocWriteRequest actionRequest : actionRequests) { + IndexRequest indexRequest = null; + if (actionRequest instanceof IndexRequest) { + indexRequest = (IndexRequest) actionRequest; + } else if (actionRequest instanceof UpdateRequest) { + UpdateRequest updateRequest = (UpdateRequest) actionRequest; + indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); + } + if (indexRequest == null) { + continue; + } + String pipelineId = indexRequest.getPipeline(); + if (NOOP_PIPELINE_NAME.equals(pipelineId) == false) { + try { + Pipeline pipeline = pipelines.get(pipelineId); + if (pipeline == null) { + throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); + } + innerExecute(indexRequest, pipeline); + //this shouldn't be needed here but we do it for consistency with index api + // which requires it to prevent double execution + indexRequest.setPipeline(NOOP_PIPELINE_NAME); + } catch (Exception e) { + itemFailureHandler.accept(indexRequest, e); + } + } + } + completionHandler.accept(null); + } + }); + } + + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; + + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + Iterator iterator = newStatsPerPipeline.keySet().iterator(); + while (iterator.hasNext()) { + String pipeline = iterator.next(); + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + iterator.remove(); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { + if (pipeline.getProcessors().isEmpty()) { + return; + } + + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + Long version = indexRequest.version(); + VersionType versionType = indexRequest.versionType(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); + pipeline.execute(ingestDocument); + + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); + if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { + indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); + } + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } + } + + private void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + return; + } + + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); + if (Objects.equals(ingestMetadata, previousIngestMetadata)) { + return; + } + + Map pipelines = new HashMap<>(); + List exceptions = new ArrayList<>(); + for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { + try { + pipelines.put( + pipeline.getId(), + Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService) + ); + } catch (ElasticsearchParseException e) { + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + exceptions.add(e); + } catch (Exception e) { + ElasticsearchParseException parseException = new ElasticsearchParseException( + "Error updating pipeline with id [" + pipeline.getId() + "]", e); + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + exceptions.add(parseException); + } + } + this.pipelines = Collections.unmodifiableMap(pipelines); + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + private static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index 1b0553a54902b..0a8f9fbc0d894 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import org.elasticsearch.script.ScriptService; /** * A pipeline is a list of {@link Processor} instances grouped under a unique id. @@ -51,6 +52,28 @@ public Pipeline(String id, @Nullable String description, @Nullable Integer versi this.version = version; } + public static Pipeline create(String id, Map config, + Map processorFactories, ScriptService scriptService) throws Exception { + String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); + Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); + List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); + List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, scriptService, processorFactories); + List> onFailureProcessorConfigs = + ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); + List onFailureProcessors = + ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); + if (config.isEmpty() == false) { + throw new ElasticsearchParseException("pipeline [" + id + + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); + } + if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { + throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); + } + CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), + Collections.unmodifiableList(onFailureProcessors)); + return new Pipeline(id, description, version, compoundProcessor); + } + /** * Modifies the data of a document to be indexed based on the processor this pipeline holds */ @@ -113,27 +136,4 @@ public List flattenAllProcessors() { return compoundProcessor.flattenProcessors(); } - public static final class Factory { - - public Pipeline create(String id, Map config, Map processorFactories) throws Exception { - String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); - Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); - List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); - List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories); - List> onFailureProcessorConfigs = - ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); - List onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories); - if (config.isEmpty() == false) { - throw new ElasticsearchParseException("pipeline [" + id + - "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); - } - if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { - throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); - } - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), - Collections.unmodifiableList(onFailureProcessors)); - return new Pipeline(id, description, version, compoundProcessor); - } - - } } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index a2aa8e385e3f9..6778f3d1eaa6a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -19,7 +19,6 @@ package org.elasticsearch.ingest; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.ParseField; @@ -117,13 +116,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public static PipelineConfiguration readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class)); - } else { - final String id = in.readString(); - final BytesReference config = in.readBytesReference(); - return new PipelineConfiguration(id, config, XContentHelper.xContentType(config)); - } + return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class)); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -134,9 +127,7 @@ public static Diff readDiffFrom(StreamInput in) throws IO public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java deleted file mode 100644 index 56d44ee888122..0000000000000 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -public class PipelineExecutionService implements ClusterStateApplier { - - private final PipelineStore store; - private final ThreadPool threadPool; - - private final StatsHolder totalStats = new StatsHolder(); - private volatile Map statsHolderPerPipeline = Collections.emptyMap(); - - public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { - this.store = store; - this.threadPool = threadPool; - } - - public void executeBulkRequest(Iterable> actionRequests, - BiConsumer itemFailureHandler, - Consumer completionHandler) { - threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception e) { - completionHandler.accept(e); - } - - @Override - protected void doRun() throws Exception { - for (DocWriteRequest actionRequest : actionRequests) { - IndexRequest indexRequest = null; - if (actionRequest instanceof IndexRequest) { - indexRequest = (IndexRequest) actionRequest; - } else if (actionRequest instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) actionRequest; - indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); - } - if (indexRequest == null) { - continue; - } - String pipeline = indexRequest.getPipeline(); - if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { - try { - innerExecute(indexRequest, getPipeline(indexRequest.getPipeline())); - //this shouldn't be needed here but we do it for consistency with index api - // which requires it to prevent double execution - indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); - } catch (Exception e) { - itemFailureHandler.accept(indexRequest, e); - } - } - } - completionHandler.accept(null); - } - }); - } - - public IngestStats stats() { - Map statsHolderPerPipeline = this.statsHolderPerPipeline; - - Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); - for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { - statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); - } - - return new IngestStats(totalStats.createStats(), statsPerPipeline); - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); - if (ingestMetadata != null) { - updatePipelineStats(ingestMetadata); - } - } - - void updatePipelineStats(IngestMetadata ingestMetadata) { - boolean changed = false; - Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); - Iterator iterator = newStatsPerPipeline.keySet().iterator(); - while (iterator.hasNext()) { - String pipeline = iterator.next(); - if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { - iterator.remove(); - changed = true; - } - } - for (String pipeline : ingestMetadata.getPipelines().keySet()) { - if (newStatsPerPipeline.containsKey(pipeline) == false) { - newStatsPerPipeline.put(pipeline, new StatsHolder()); - changed = true; - } - } - - if (changed) { - statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); - } - } - - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - if (pipeline.getProcessors().isEmpty()) { - return; - } - - long startTimeInNanos = System.nanoTime(); - // the pipeline specific stat holder may not exist and that is fine: - // (e.g. the pipeline may have been removed while we're ingesting a document - Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); - try { - totalStats.preIngest(); - pipelineStats.ifPresent(StatsHolder::preIngest); - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - Long version = indexRequest.version(); - VersionType versionType = indexRequest.versionType(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); - pipeline.execute(ingestDocument); - - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); - if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { - indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); - } - indexRequest.source(ingestDocument.getSourceAndMetadata()); - } catch (Exception e) { - totalStats.ingestFailed(); - pipelineStats.ifPresent(StatsHolder::ingestFailed); - throw e; - } finally { - long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); - totalStats.postIngest(ingestTimeInMillis); - pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); - } - } - - private Pipeline getPipeline(String pipelineId) { - Pipeline pipeline = store.get(pipelineId); - if (pipeline == null) { - throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); - } - return pipeline; - } - - static class StatsHolder { - - private final MeanMetric ingestMetric = new MeanMetric(); - private final CounterMetric ingestCurrent = new CounterMetric(); - private final CounterMetric ingestFailed = new CounterMetric(); - - void preIngest() { - ingestCurrent.inc(); - } - - void postIngest(long ingestTimeInMillis) { - ingestCurrent.dec(); - ingestMetric.inc(ingestTimeInMillis); - } - - void ingestFailed() { - ingestFailed.inc(); - } - - IngestStats.Stats createStats() { - return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); - } - - } - -} diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java deleted file mode 100644 index 9fceaf1a9a573..0000000000000 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -public class PipelineStore extends AbstractComponent implements ClusterStateApplier { - - private final Pipeline.Factory factory = new Pipeline.Factory(); - private final Map processorFactories; - - // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. - // We know of all the processor factories when a node with all its plugin have been initialized. Also some - // processor factories rely on other node services. Custom metadata is statically registered when classes - // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. - volatile Map pipelines = new HashMap<>(); - - public PipelineStore(Settings settings, Map processorFactories) { - super(settings); - this.processorFactories = processorFactories; - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - innerUpdatePipelines(event.previousState(), event.state()); - } - - void innerUpdatePipelines(ClusterState previousState, ClusterState state) { - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - - IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); - IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); - if (Objects.equals(ingestMetadata, previousIngestMetadata)) { - return; - } - - Map pipelines = new HashMap<>(); - List exceptions = new ArrayList<>(); - for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { - try { - pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); - } catch (ElasticsearchParseException e) { - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); - exceptions.add(e); - } catch (Exception e) { - ElasticsearchParseException parseException = new ElasticsearchParseException( - "Error updating pipeline with id [" + pipeline.getId() + "]", e); - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); - exceptions.add(parseException); - } - } - this.pipelines = Collections.unmodifiableMap(pipelines); - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - private Pipeline substitutePipeline(String id, ElasticsearchParseException e) { - String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; - String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; - String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; - Processor failureProcessor = new AbstractProcessor(tag) { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException(errorMessage); - } - - @Override - public String getType() { - return type; - } - }; - String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; - return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); - } - - /** - * Deletes the pipeline specified by id in the request. - */ - public void delete(ClusterService clusterService, DeletePipelineRequest request, ActionListener listener) { - clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerDelete(request, currentState); - } - }); - } - - ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - if (currentIngestMetadata == null) { - return currentState; - } - Map pipelines = currentIngestMetadata.getPipelines(); - Set toRemove = new HashSet<>(); - for (String pipelineKey : pipelines.keySet()) { - if (Regex.simpleMatch(request.getId(), pipelineKey)) { - toRemove.add(pipelineKey); - } - } - if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { - throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); - } else if (toRemove.isEmpty()) { - return currentState; - } - final Map pipelinesCopy = new HashMap<>(pipelines); - for (String key : toRemove) { - pipelinesCopy.remove(key); - } - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) - .build()); - return newState.build(); - } - - /** - * Stores the specified pipeline definition in the request. - */ - public void put(ClusterService clusterService, Map ingestInfos, PutPipelineRequest request, - ActionListener listener) throws Exception { - // validates the pipeline and processor configuration before submitting a cluster update task: - validatePipeline(ingestInfos, request); - clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerPut(request, currentState); - } - }); - } - - void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { - if (ingestInfos.isEmpty()) { - throw new IllegalStateException("Ingest info is empty"); - } - - Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = factory.create(request.getId(), pipelineConfig, processorFactories); - List exceptions = new ArrayList<>(); - for (Processor processor : pipeline.flattenAllProcessors()) { - for (Map.Entry entry : ingestInfos.entrySet()) { - if (entry.getValue().containsProcessor(processor.getType()) == false) { - String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; - exceptions.add(ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message)); - } - } - } - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - Map pipelines; - if (currentIngestMetadata != null) { - pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); - } else { - pipelines = new HashMap<>(); - } - - pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) - .build()); - return newState.build(); - } - - /** - * Returns the pipeline by the specified id - */ - public Pipeline get(String id) { - return pipelines.get(id); - } - - public Map getProcessorFactories() { - return processorFactories; - } - - /** - * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines - * may be returned - */ - // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't - // know how to serialize themselves. - public List getPipelines(ClusterState clusterState, String... ids) { - IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); - return innerGetPipelines(ingestMetadata, ids); - } - - List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { - if (ingestMetadata == null) { - return Collections.emptyList(); - } - - // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') - if (ids.length == 0) { - return new ArrayList<>(ingestMetadata.getPipelines().values()); - } - - List result = new ArrayList<>(ids.length); - for (String id : ids) { - if (Regex.isSimpleMatchPattern(id)) { - for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { - if (Regex.simpleMatch(id, entry.getKey())) { - result.add(entry.getValue()); - } - } - } else { - PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); - if (pipeline != null) { - result.add(pipeline); - } - } - } - return result; - } -} diff --git a/server/src/main/java/org/elasticsearch/ingest/Processor.java b/server/src/main/java/org/elasticsearch/ingest/Processor.java index c318d478814de..15a26d3749191 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/server/src/main/java/org/elasticsearch/ingest/Processor.java @@ -97,22 +97,26 @@ class Parameters { * instances that have run prior to in ingest. */ public final ThreadContext threadContext; - + public final LongSupplier relativeTimeSupplier; - + + public final IngestService ingestService; + /** * Provides scheduler support */ public final BiFunction> scheduler; public Parameters(Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, ThreadContext threadContext, - LongSupplier relativeTimeSupplier, BiFunction> scheduler) { + LongSupplier relativeTimeSupplier, BiFunction> scheduler, + IngestService ingestService) { this.env = env; this.scriptService = scriptService; this.threadContext = threadContext; this.analysisRegistry = analysisRegistry; this.relativeTimeSupplier = relativeTimeSupplier; this.scheduler = scheduler; + this.ingestService = ingestService; } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 637f4cf1cbe00..3bdfe95f1e2c6 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -52,11 +52,7 @@ public OsStats(StreamInput in) throws IOException { this.cpu = new Cpu(in); this.mem = new Mem(in); this.swap = new Swap(in); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.cgroup = in.readOptionalWriteable(Cgroup::new); - } else { - this.cgroup = null; - } + this.cgroup = in.readOptionalWriteable(Cgroup::new); } @Override @@ -65,9 +61,7 @@ public void writeTo(StreamOutput out) throws IOException { cpu.writeTo(out); mem.writeTo(out); swap.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalWriteable(cgroup); - } + out.writeOptionalWriteable(cgroup); } public long getTimestamp() { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 274157b427ad6..250ef8f2dd9e4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -352,7 +352,7 @@ protected Node(final Environment environment, Collection final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.addStateApplier(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final IngestService ingestService = new IngestService(settings, threadPool, this.environment, + final IngestService ingestService = new IngestService(clusterService, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, clusterService.getClusterSettings(), client); @@ -470,7 +470,7 @@ protected Node(final Environment environment, Collection final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), - clusterModule.getAllocationService()); + clusterModule.getAllocationService(), environment.configFile()); this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(), transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService, diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 0e19b5a650221..207886c5cf263 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -83,8 +82,7 @@ public class NodeService extends AbstractComponent implements Closeable { this.scriptService = scriptService; this.responseCollectorService = responseCollectorService; this.searchTransportService = searchTransportService; - clusterService.addStateApplier(ingestService.getPipelineStore()); - clusterService.addStateApplier(ingestService.getPipelineExecutionService()); + clusterService.addStateApplier(ingestService); } public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool, @@ -120,7 +118,7 @@ public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, bo circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, discoveryStats ? discovery.stats() : null, - ingest ? ingestService.getPipelineExecutionService().stats() : null, + ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null ); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index f81b7c770e56c..b7a179e41e381 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -188,7 +188,7 @@ public long getNumberOfTasksOnNode(String nodeId, String taskName) { @Override public Version getMinimalSupportedVersion() { - return Version.V_5_4_0; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 74a911b0ae4fc..d211efef5173e 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -107,11 +107,7 @@ public PluginInfo(final StreamInput in) throws IOException { } else { extendedPlugins = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - hasNativeController = in.readBoolean(); - } else { - hasNativeController = false; - } + hasNativeController = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta2) && in.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was @@ -134,9 +130,7 @@ public void writeTo(final StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeStringList(extendedPlugins); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(hasNativeController); - } + out.writeBoolean(hasNativeController); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta2) && out.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e113240bee3dc..9469f657c96bd 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,7 +39,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -719,7 +718,7 @@ public boolean isReadOnly() { protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException { assert isReadOnly() == false; // can not write to a read only repository final long currentGen = latestIndexBlobId(); - if (repositoryStateId != SnapshotsInProgress.UNDEFINED_REPOSITORY_STATE_ID && currentGen != repositoryStateId) { + if (currentGen != repositoryStateId) { // the index file was updated by a concurrent operation, so we were operating on stale // repository data throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + @@ -1493,7 +1492,7 @@ public void restore() throws IOException { // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setOpenMode(IndexWriterConfig.OpenMode.CREATE) .setCommitOnClose(true)); writer.close(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java index 759cd4a773dd9..f25fd107e51db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java @@ -90,8 +90,7 @@ public static void buildBroadcastShardsHeader(XContentBuilder builder, Params pa builder.field(FAILED_FIELD.getPreferredName(), failed); if (shardFailures != null && shardFailures.length > 0) { builder.startArray(FAILURES_FIELD.getPreferredName()); - final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default - for (ShardOperationFailedException shardFailure : group ? ExceptionsHelper.groupBy(shardFailures) : shardFailures) { + for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) { builder.startObject(); shardFailure.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index b452b62eb5e95..746bb643bf62d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -87,13 +87,19 @@ public boolean canTripCircuitBreaker() { private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { - return - new ClusterGetSettingsResponse( - state.metaData().persistentSettings(), - state.metaData().transientSettings(), - renderDefaults ? - settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) : - Settings.EMPTY - ).toXContent(builder, params); + return response(state, renderDefaults, settingsFilter, clusterSettings, settings).toXContent(builder, params); } + + static ClusterGetSettingsResponse response( + final ClusterState state, + final boolean renderDefaults, + final SettingsFilter settingsFilter, + final ClusterSettings clusterSettings, + final Settings settings) { + return new ClusterGetSettingsResponse( + settingsFilter.filter(state.metaData().persistentSettings()), + settingsFilter.filter(state.metaData().transientSettings()), + renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 0697871ea5d1c..2251615d678fb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -59,7 +59,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client .cluster() .prepareReloadSecureSettings() .setTimeout(request.param("timeout")) - .source(request.requiredContent(), request.getXContentType()) .setNodesIds(nodesIds); final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request(); return channel -> nodesRequestBuilder @@ -68,12 +67,12 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); + { + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + } builder.endObject(); - // clear password for the original request - nodesRequest.secureSettingsPassword().close(); return new BytesRestResponse(RestStatus.OK, builder); } }); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 2a60262b32f57..6239015dae418 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -86,6 +86,14 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a int preFilterShardSize = restRequest.paramAsInt("pre_filter_shard_size", SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE); + final Integer maxConcurrentShardRequests; + if (restRequest.hasParam("max_concurrent_shard_requests")) { + // only set if we have the parameter since we auto adjust the max concurrency on the coordinator + // based on the number of nodes in the cluster + maxConcurrentShardRequests = restRequest.paramAsInt("max_concurrent_shard_requests", Integer.MIN_VALUE); + } else { + maxConcurrentShardRequests = null; + } parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> { searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); @@ -96,6 +104,9 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a for (SearchRequest request : requests) { // preserve if it's set on the request request.setPreFilterShardSize(Math.min(preFilterShardSize, request.getPreFilterShardSize())); + if (maxConcurrentShardRequests != null) { + request.setMaxConcurrentShardRequests(maxConcurrentShardRequests); + } } return multiRequest; } diff --git a/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java new file mode 100644 index 0000000000000..27ce29b95dc50 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * A script used by {@link org.elasticsearch.ingest.ConditionalProcessor}. + */ +public abstract class IngestConditionalScript { + + public static final String[] PARAMETERS = { "ctx" }; + + /** The context used to compile {@link IngestConditionalScript} factories. */ + public static final ScriptContext CONTEXT = new ScriptContext<>("processor_conditional", Factory.class); + + /** The generic runtime parameters for the script. */ + private final Map params; + + public IngestConditionalScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract boolean execute(Map ctx); + + public interface Factory { + IngestConditionalScript newInstance(Map params); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index a64a3ecd37640..67ea4f24b83f8 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -451,133 +450,24 @@ public Script(ScriptType type, String lang, String idOrCode, Map * Creates a {@link Script} read from an input stream. */ public Script(StreamInput in) throws IOException { - // Version 5.3 allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - this.type = ScriptType.readFrom(in); - this.lang = in.readOptionalString(); - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - this.options = options; - this.params = in.readMap(); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the read in contents to be in - // same order as the constructor. - } else if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.type = ScriptType.readFrom(in); - String lang = in.readString(); - this.lang = this.type == ScriptType.STORED ? null : lang; - - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - - if (this.type != ScriptType.INLINE && options.isEmpty()) { - this.options = null; - } else { - this.options = options; - } - - this.params = in.readMap(); - // Prior to version 5.1 the script members are read in certain cases as optional and given - // default values when necessary. Also the only option supported is for XContentType. - } else { - this.idOrCode = in.readString(); - - if (in.readBoolean()) { - this.type = ScriptType.readFrom(in); - } else { - this.type = DEFAULT_SCRIPT_TYPE; - } - - String lang = in.readOptionalString(); - - if (lang == null) { - this.lang = this.type == ScriptType.STORED ? null : DEFAULT_SCRIPT_LANG; - } else { - this.lang = lang; - } - - Map params = in.readMap(); - - if (params == null) { - this.params = new HashMap<>(); - } else { - this.params = params; - } - - if (in.readBoolean()) { - this.options = new HashMap<>(); - XContentType contentType = in.readEnum(XContentType.class); - this.options.put(CONTENT_TYPE_OPTION, contentType.mediaType()); - } else if (type == ScriptType.INLINE) { - options = new HashMap<>(); - } else { - this.options = null; - } - } + this.type = ScriptType.readFrom(in); + this.lang = in.readOptionalString(); + this.idOrCode = in.readString(); + @SuppressWarnings("unchecked") + Map options = (Map)(Map)in.readMap(); + this.options = options; + this.params = in.readMap(); } @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - type.writeTo(out); - out.writeOptionalString(lang); - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - out.writeMap(options); - out.writeMap(params); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the written out contents to be in - // same order as the constructor. - } else if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - type.writeTo(out); - - if (lang == null) { - out.writeString(""); - } else { - out.writeString(lang); - } - - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - - if (options == null) { - out.writeMap(new HashMap<>()); - } else { - out.writeMap(options); - } - - out.writeMap(params); - // Prior to version 5.1 the Script members were possibly written as optional or null, though there is no case where a null - // value wasn't equivalent to it's default value when actually compiling/executing a script. Meaning, there are no - // backwards compatibility issues, and now there's enforced consistency. Also the only supported compiler - // option was XContentType. - } else { - out.writeString(idOrCode); - out.writeBoolean(true); - type.writeTo(out); - out.writeOptionalString(lang); - - if (params.isEmpty()) { - out.writeMap(null); - } else { - out.writeMap(params); - } - - if (options != null && options.containsKey(CONTENT_TYPE_OPTION)) { - XContentType contentType = XContentType.fromMediaTypeOrFormat(options.get(CONTENT_TYPE_OPTION)); - out.writeBoolean(true); - out.writeEnum(contentType); - } else { - out.writeBoolean(false); - } - } + type.writeTo(out); + out.writeOptionalString(lang); + out.writeString(idOrCode); + @SuppressWarnings("unchecked") + Map options = (Map) (Map) this.options; + out.writeMap(options); + out.writeMap(params); } /** diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 59d824eb313e0..35a7c2e60d685 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -292,25 +292,7 @@ public ScriptMetaData(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { String id = in.readString(); - - // Prior to version 5.3 all scripts were stored using the deprecated namespace. - // Split the id to find the language then use StoredScriptSource to parse the - // expected BytesReference after which a new StoredScriptSource is created - // with the appropriate language and options. - if (in.getVersion().before(Version.V_5_3_0)) { - int split = id.indexOf('#'); - - if (split == -1) { - throw new IllegalArgumentException("illegal stored script id [" + id + "], does not contain lang"); - } else { - source = new StoredScriptSource(in); - source = new StoredScriptSource(id.substring(0, split), source.getSource(), Collections.emptyMap()); - } - // Version 5.3+ can just be parsed normally using StoredScriptSource. - } else { - source = new StoredScriptSource(in); - } - + source = new StoredScriptSource(in); scripts.put(id, source); } @@ -319,34 +301,11 @@ public ScriptMetaData(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ will output the contents of the scripts' Map using - // StoredScriptSource to stored the language, code, and options. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeVInt(scripts.size()); - - for (Map.Entry entry : scripts.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - // Prior to Version 5.3, stored scripts can only be read using the deprecated - // namespace. Scripts using the deprecated namespace are first isolated in a - // temporary Map, then written out. Since all scripts will be stored using the - // deprecated namespace, no scripts will be lost. - } else { - Map filtered = new HashMap<>(); - - for (Map.Entry entry : scripts.entrySet()) { - if (entry.getKey().contains("#")) { - filtered.put(entry.getKey(), entry.getValue()); - } - } - - out.writeVInt(filtered.size()); + out.writeVInt(scripts.size()); - for (Map.Entry entry : filtered.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + for (Map.Entry entry : scripts.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index f04e690fa425c..1788d8c792bf0 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -51,6 +51,7 @@ public class ScriptModule { BucketAggregationSelectorScript.CONTEXT, SignificantTermsHeuristicScoreScript.CONTEXT, IngestScript.CONTEXT, + IngestConditionalScript.CONTEXT, FilterScript.CONTEXT, SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT, diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index 774dc95d39977..9f6ea999a9306 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -33,9 +33,9 @@ public class ScriptedMetricAggContexts { private abstract static class ParamsAndStateBase { private final Map params; - private final Object state; + private final Map state; - ParamsAndStateBase(Map params, Object state) { + ParamsAndStateBase(Map params, Map state) { this.params = params; this.state = state; } @@ -50,14 +50,14 @@ public Object getState() { } public abstract static class InitScript extends ParamsAndStateBase { - public InitScript(Map params, Object state) { + public InitScript(Map params, Map state) { super(params, state); } public abstract void execute(); public interface Factory { - InitScript newInstance(Map params, Object state); + InitScript newInstance(Map params, Map state); } public static String[] PARAMETERS = {}; @@ -68,7 +68,7 @@ public abstract static class MapScript extends ParamsAndStateBase { private final LeafSearchLookup leafLookup; private Scorer scorer; - public MapScript(Map params, Object state, SearchLookup lookup, LeafReaderContext leafContext) { + public MapScript(Map params, Map state, SearchLookup lookup, LeafReaderContext leafContext) { super(params, state); this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); @@ -110,7 +110,7 @@ public interface LeafFactory { } public interface Factory { - LeafFactory newFactory(Map params, Object state, SearchLookup lookup); + LeafFactory newFactory(Map params, Map state, SearchLookup lookup); } public static String[] PARAMETERS = new String[] {}; @@ -118,14 +118,14 @@ public interface Factory { } public abstract static class CombineScript extends ParamsAndStateBase { - public CombineScript(Map params, Object state) { + public CombineScript(Map params, Map state) { super(params, state); } public abstract Object execute(); public interface Factory { - CombineScript newInstance(Map params, Object state); + CombineScript newInstance(Map params, Map state); } public static String[] PARAMETERS = {}; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 4bf5e03b8a7cc..a7db2c55fe149 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -98,6 +98,8 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -789,14 +791,21 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.fetchSourceContext(source.fetchSource()); } if (source.docValueFields() != null) { + List docValueFields = new ArrayList<>(); + for (DocValueFieldsContext.FieldAndFormat format : source.docValueFields()) { + Collection fieldNames = context.mapperService().simpleMatchToFullName(format.field); + for (String fieldName: fieldNames) { + docValueFields.add(new DocValueFieldsContext.FieldAndFormat(fieldName, format.format)); + } + } int maxAllowedDocvalueFields = context.mapperService().getIndexSettings().getMaxDocvalueFields(); - if (source.docValueFields().size() > maxAllowedDocvalueFields) { + if (docValueFields.size() > maxAllowedDocvalueFields) { throw new IllegalArgumentException( - "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields - + "] but was [" + source.docValueFields().size() + "]. This limit can be set by changing the [" - + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields + + "] but was [" + docValueFields.size() + "]. This limit can be set by changing the [" + + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); } - context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields())); + context.docValueFieldsContext(new DocValueFieldsContext(docValueFields)); } if (source.highlighter() != null) { HighlightBuilder highlightBuilder = source.highlighter(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 19c0f8c64d58b..4a46c7202d14e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -19,7 +19,6 @@ package org.elasticsearch.search; -import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,11 +51,7 @@ public SearchShardTarget(StreamInput in) throws IOException { } shardId = ShardId.readShardId(in); this.originalIndices = null; - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - clusterAlias = in.readOptionalString(); - } else { - clusterAlias = null; - } + clusterAlias = in.readOptionalString(); } public SearchShardTarget(String nodeId, ShardId shardId, String clusterAlias, OriginalIndices originalIndices) { @@ -121,9 +116,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeText(nodeId); } shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeOptionalString(clusterAlias); - } + out.writeOptionalString(clusterAlias); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java index d145e32c45b3e..17b50fa9bef5f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketUtils.java @@ -31,27 +31,21 @@ private BucketUtils() {} * * @param finalSize * The number of terms required in the final reduce phase. - * @param numberOfShards - * The number of shards being queried. + * @param singleShard + * whether a single shard is being queried, or multiple shards * @return A suggested default for the size of any shard-side PriorityQueues */ - public static int suggestShardSideQueueSize(int finalSize, int numberOfShards) { + public static int suggestShardSideQueueSize(int finalSize, boolean singleShard) { if (finalSize < 1) { throw new IllegalArgumentException("size must be positive, got " + finalSize); } - if (numberOfShards < 1) { - throw new IllegalArgumentException("number of shards must be positive, got " + numberOfShards); - } - - if (numberOfShards == 1) { + if (singleShard) { // In the case of a single shard, we do not need to over-request return finalSize; } - // Request 50% more buckets on the shards in order to improve accuracy // as well as a small constant that should help with small values of 'size' final long shardSampleSize = (long) (finalSize * 1.5 + 10); return (int) Math.min(Integer.MAX_VALUE, shardSampleSize); } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index e35bf376aae4d..810126e851251 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -209,7 +209,10 @@ protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) th } } if (changed) { - return new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed); + FiltersAggregationBuilder rewritten = new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed); + rewritten.otherBucket(otherBucket); + rewritten.otherBucketKey(otherBucketKey); + return rewritten; } else { return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 569845fcdf0e4..353f391f213d6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -157,7 +157,7 @@ public int shardSize() { if (shardSize < 0) { // Use default heuristic to avoid any wrong-ranking caused by // distributed counting - shardSize = BucketUtils.suggestShardSideQueueSize(requiredSize, context.numberOfShards()); + shardSize = BucketUtils.suggestShardSideQueueSize(requiredSize, context.numberOfShards() == 1); } if (requiredSize <= 0 || shardSize <= 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 50a0c85c041c8..b97670ddb5730 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -156,7 +156,7 @@ public int getNumBuckets() { return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); } - private static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { + static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); if (timeZone != null) { tzRoundingBuilder.timeZone(timeZone); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 6a78ca6724988..7bc2b9a317838 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -418,7 +418,7 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return currentResult; } int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx, - bucketInfo.roundingInfos); + bucketInfo.roundingInfos, targetBuckets); RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx]; Rounding rounding = roundingInfo.rounding; // merge buckets using the new rounding @@ -447,8 +447,8 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return new BucketReduceResult(list, roundingInfo, roundingIdx); } - private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, - RoundingInfo[] roundings) { + static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, + RoundingInfo[] roundings, int targetBuckets) { if (roundingIdx == roundings.length - 1) { return roundingIdx; } @@ -480,7 +480,7 @@ private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, currentKey = currentRounding.nextRoundingValue(currentKey); } currentRoundingIdx++; - } while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval()) + } while (requiredBuckets > (targetBuckets * roundings[currentRoundingIdx - 1].getMaximumInnerInterval()) && currentRoundingIdx < roundings.length); // The loop will increase past the correct rounding index here so we // need to subtract one to get the rounding index we need diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index df1bd115e2bfc..d612014e0177f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -195,7 +195,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare // such are impossible to differentiate from non-significant terms // at that early stage. bucketCountThresholds.setShardSize(2 * BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize(), - context.numberOfShards())); + context.numberOfShards() == 1)); } if (valuesSource instanceof ValuesSource.Bytes) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java index ea9a8a91aea9e..a51a33defdd00 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java @@ -176,7 +176,7 @@ protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingl // such are impossible to differentiate from non-significant terms // at that early stage. bucketCountThresholds.setShardSize(2 * BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize(), - context.numberOfShards())); + context.numberOfShards() == 1)); } // TODO - need to check with mapping that this is indeed a text field.... diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 9e3012c5eb9d6..8154108f9f0bc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -36,7 +36,6 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -424,13 +423,8 @@ public IncludeExclude(StreamInput in) throws IOException { } else { excludeValues = null; } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - incNumPartitions = in.readVInt(); - incZeroBasedPartition = in.readVInt(); - } else { - incNumPartitions = 0; - incZeroBasedPartition = 0; - } + incNumPartitions = in.readVInt(); + incZeroBasedPartition = in.readVInt(); } @Override @@ -457,10 +451,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBytesRef(value); } } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeVInt(incNumPartitions); - out.writeVInt(incZeroBasedPartition); - } + out.writeVInt(incNumPartitions); + out.writeVInt(incZeroBasedPartition); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index a6481b58ca499..cc2719e5b9678 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -122,7 +122,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare // heuristic to avoid any wrong-ranking caused by distributed // counting bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize(), - context.numberOfShards())); + context.numberOfShards() == 1)); } bucketCountThresholds.ensureValidity(); if (valuesSource instanceof ValuesSource.Bytes) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index f4281c063ff2c..db0993d12967d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -95,9 +95,6 @@ public InternalAggregation doReduce(List aggregations, Redu params.putAll(firstAggregation.reduceScript.getParams()); } - // Add _aggs to params map for backwards compatibility (redundant with a context variable on the ReduceScript created below). - params.put("_aggs", aggregationObjects); - ScriptedMetricAggContexts.ReduceScript.Factory factory = reduceContext.scriptService().compile( firstAggregation.reduceScript, ScriptedMetricAggContexts.ReduceScript.CONTEXT); ScriptedMetricAggContexts.ReduceScript script = factory.newInstance(params, aggregationObjects); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index ffdff44b783b6..ea7bf270b8b62 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -41,10 +41,10 @@ public class ScriptedMetricAggregator extends MetricsAggregator { private final ScriptedMetricAggContexts.MapScript.LeafFactory mapScript; private final ScriptedMetricAggContexts.CombineScript combineScript; private final Script reduceScript; - private Object aggState; + private Map aggState; protected ScriptedMetricAggregator(String name, ScriptedMetricAggContexts.MapScript.LeafFactory mapScript, ScriptedMetricAggContexts.CombineScript combineScript, - Script reduceScript, Object aggState, SearchContext context, Aggregator parent, + Script reduceScript, Map aggState, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index 9bd904a07013d..3b8f8321deaa8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -80,13 +80,7 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu aggParams = new HashMap<>(); } - // Add _agg to params map for backwards compatibility (redundant with context variables on the scripts created below). - // When this is removed, aggState (as passed to ScriptedMetricAggregator) can be changed to Map, since - // it won't be possible to completely replace it with another type as is possible when it's an entry in params. - if (aggParams.containsKey("_agg") == false) { - aggParams.put("_agg", new HashMap()); - } - Object aggState = aggParams.get("_agg"); + Map aggState = new HashMap(); final ScriptedMetricAggContexts.InitScript initScript = this.initScript.newInstance( mergeParams(aggParams, initScriptParams), aggState); diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c42a1a12a1877..92ae481a830dd 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -248,9 +248,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - collapse = in.readOptionalWriteable(CollapseBuilder::new); - } + collapse = in.readOptionalWriteable(CollapseBuilder::new); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { trackTotalHits = in.readBoolean(); } else { @@ -313,9 +311,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeOptionalWriteable(collapse); - } + out.writeOptionalWriteable(collapse); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { out.writeBoolean(trackTotalHits); } @@ -1154,9 +1150,7 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th } } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { if (from != -1) { builder.field(FROM_FIELD.getPreferredName(), from); } @@ -1294,6 +1288,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (collapse != null) { builder.field(COLLAPSE.getPreferredName(), collapse); } + return builder; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index ccab5e2cb93b3..2ebf413b1405d 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.collapse; import org.apache.lucene.index.IndexOptions; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -94,31 +93,14 @@ public CollapseBuilder(String field) { public CollapseBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.maxConcurrentGroupRequests = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - this.innerHits = in.readList(InnerHitBuilder::new); - } else { - InnerHitBuilder innerHitBuilder = in.readOptionalWriteable(InnerHitBuilder::new); - if (innerHitBuilder != null) { - this.innerHits = Collections.singletonList(innerHitBuilder); - } else { - this.innerHits = Collections.emptyList(); - } - } + this.innerHits = in.readList(InnerHitBuilder::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeVInt(maxConcurrentGroupRequests); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(innerHits); - } else { - boolean hasInnerHit = innerHits.isEmpty() == false; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHits.get(0).writeToCollapseBWC(out); - } - } + out.writeList(innerHits); } public static CollapseBuilder fromXContent(XContentParser parser) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index 2da74c56f6a33..a7f333abfa2ef 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -57,6 +57,7 @@ public void hitExecute(SearchContext context, HitContext hitContext) { if (nestedHit) { value = getNestedSource((Map) value, hitContext); } + try { final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); @@ -81,6 +82,9 @@ public void hitExecute(SearchContext context, HitContext hitContext) { private Map getNestedSource(Map sourceAsMap, HitContext hitContext) { for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) { sourceAsMap = (Map) sourceAsMap.get(o.getField().string()); + if (sourceAsMap == null) { + return null; + } } return sourceAsMap; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 7888f6cd5a098..161ca9279f094 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -152,17 +151,13 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { order(in.readOptionalWriteable(Order::readFromStream)); highlightFilter(in.readOptionalBoolean()); forceSource(in.readOptionalBoolean()); - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream)); - } + boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream)); boundaryMaxScan(in.readOptionalVInt()); if (in.readBoolean()) { boundaryChars(in.readString().toCharArray()); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - if (in.readBoolean()) { - boundaryScannerLocale(in.readString()); - } + if (in.readBoolean()) { + boundaryScannerLocale(in.readString()); } noMatchSize(in.readOptionalVInt()); phraseLimit(in.readOptionalVInt()); @@ -191,21 +186,17 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(order); out.writeOptionalBoolean(highlightFilter); out.writeOptionalBoolean(forceSource); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeOptionalWriteable(boundaryScannerType); - } + out.writeOptionalWriteable(boundaryScannerType); out.writeOptionalVInt(boundaryMaxScan); boolean hasBounaryChars = boundaryChars != null; out.writeBoolean(hasBounaryChars); if (hasBounaryChars) { out.writeString(String.valueOf(boundaryChars)); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - boolean hasBoundaryScannerLocale = boundaryScannerLocale != null; - out.writeBoolean(hasBoundaryScannerLocale); - if (hasBoundaryScannerLocale) { - out.writeString(boundaryScannerLocale.toLanguageTag()); - } + boolean hasBoundaryScannerLocale = boundaryScannerLocale != null; + out.writeBoolean(hasBoundaryScannerLocale); + if (hasBoundaryScannerLocale) { + out.writeString(boundaryScannerLocale.toLanguageTag()); } out.writeOptionalVInt(noMatchSize); out.writeOptionalVInt(phraseLimit); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index 049de439ac750..9483e76d072a8 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -220,7 +220,7 @@ public HighlightBuilder tagsSchema(String schemaName) { /** * Set encoder for the highlighting - * are {@code styled} and {@code default}. + * are {@code html} and {@code default}. * * @param encoder name */ diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index cf656ed3b9cb2..72a12b805eb17 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; -import java.util.Optional; /** * Shard level search request that gets created and consumed on the local node. @@ -213,25 +212,10 @@ protected void innerReadFrom(StreamInput in) throws IOException { source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); aliasFilter = new AliasFilter(in); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - indexBoost = in.readFloat(); - } else { - // Nodes < 5.2.0 doesn't send index boost. Read it from source. - if (source != null) { - Optional boost = source.indexBoosts() - .stream() - .filter(ib -> ib.getIndex().equals(shardId.getIndexName())) - .findFirst(); - indexBoost = boost.isPresent() ? boost.get().getBoost() : 1.0f; - } else { - indexBoost = 1.0f; - } - } + indexBoost = in.readFloat(); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - clusterAlias = in.readOptionalString(); - } + clusterAlias = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -254,16 +238,12 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException out.writeOptionalWriteable(source); out.writeStringArray(types); aliasFilter.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeFloat(indexBoost); - } + out.writeFloat(indexBoost); if (asKey == false) { out.writeVLong(nowInMillis); } out.writeOptionalBoolean(requestCache); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeOptionalString(clusterAlias); - } + out.writeOptionalString(clusterAlias); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ca06005448c0d..84c76e85f3dd0 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -95,7 +95,7 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep suggestPhase.execute(searchContext); // TODO: fix this once we can fetch docs for suggestions searchContext.queryResult().topDocs( - new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, 0), + new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, Float.NaN), new DocValueFormat[0]); return; } diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index dc110b2797710..8d40cc802fffd 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -120,7 +120,7 @@ Collector create(Collector in) { @Override void postProcess(QuerySearchResult result) { final int totalHitCount = hitCountSupplier.getAsInt(); - result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, 0), null); + result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, Float.NaN), null); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index 1aa82eeb2190a..0d0c7e9458910 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest.completion.context; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -28,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.CompletionFieldMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -35,6 +38,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.function.Function; /** * A {@link ContextMapping} defines criteria that can be used to @@ -131,6 +135,31 @@ public final List parseQueryContext(XContentParser parser) */ protected abstract XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException; + /** + * Checks if the current context is consistent with the rest of the fields. For example, the GeoContext + * should check that the field that it points to has the correct type. + */ + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + // No validation is required by default + } + + /** + * Verifies that all field paths specified in contexts point to the fields with correct mappings + */ + public static void validateContextPaths(Version indexVersionCreated, List fieldMappers, + Function fieldResolver) { + for (FieldMapper fieldMapper : fieldMappers) { + if (CompletionFieldMapper.CONTENT_TYPE.equals(fieldMapper.typeName())) { + CompletionFieldMapper.CompletionFieldType fieldType = ((CompletionFieldMapper) fieldMapper).fieldType(); + if (fieldType.hasContextMappings()) { + for (ContextMapping context : fieldType.getContextMappings()) { + context.validateReferences(indexVersionCreated, fieldResolver); + } + } + } + } + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FIELD_NAME, name); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 3c0f0e80cebdb..b4c3276b946b2 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -37,6 +37,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -50,7 +51,7 @@ * and creates context queries for defined {@link ContextMapping}s * for a {@link CompletionFieldMapper} */ -public class ContextMappings implements ToXContent { +public class ContextMappings implements ToXContent, Iterable> { private final List> contextMappings; private final Map> contextNameMap; @@ -97,6 +98,11 @@ public void addField(ParseContext.Document document, String name, String input, document.add(new TypedContextField(name, input, weight, contexts, document)); } + @Override + public Iterator> iterator() { + return contextMappings.iterator(); + } + /** * Field prepends context values with a suggestion * Context values are associated with a type, denoted by diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 48aaf705099da..938c4963620ed 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -19,12 +19,17 @@ package org.elasticsearch.search.suggest.completion.context; +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,6 +47,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.geo.GeoHashUtils.addNeighbors; @@ -69,6 +75,8 @@ public class GeoContextMapping extends ContextMapping { static final String CONTEXT_PRECISION = "precision"; static final String CONTEXT_NEIGHBOURS = "neighbours"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class)); + private final int precision; private final String fieldName; @@ -205,11 +213,11 @@ public Set parseContext(Document document) { for (IndexableField field : fields) { if (field instanceof StringField) { spare.resetFromString(field.stringValue()); - } else { - // todo return this to .stringValue() once LatLonPoint implements it + geohashes.add(spare.geohash()); + } else if (field instanceof LatLonPoint || field instanceof LatLonDocValuesField) { spare.resetFromIndexableField(field); + geohashes.add(spare.geohash()); } - geohashes.add(spare.geohash()); } } } @@ -279,6 +287,32 @@ public List toInternalQueryContexts(List return internalQueryContextList; } + @Override + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + if (fieldName != null) { + MappedFieldType mappedFieldType = fieldResolver.apply(fieldName); + if (mappedFieldType == null) { + if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name); + } else { + throw new ElasticsearchParseException( + "field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name); + } + } else if (GeoPointFieldMapper.CONTENT_TYPE.equals(mappedFieldType.typeName()) == false) { + if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]", + fieldName, name, mappedFieldType.typeName()); + } else { + throw new ElasticsearchParseException( + "field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]", + fieldName, name, mappedFieldType.typeName()); + } + } + } + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index a7df9bdfdfd87..6acdbad2ccec9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -64,6 +64,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; @@ -120,7 +121,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp SETTING_NUMBER_OF_SHARDS, SETTING_VERSION_CREATED, SETTING_INDEX_UUID, - SETTING_CREATION_DATE)); + SETTING_CREATION_DATE, + IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey())); // It's OK to change some settings, but we shouldn't allow simply removing them private static final Set UNREMOVABLE_SETTINGS; @@ -292,6 +294,7 @@ public ClusterState execute(ClusterState currentState) { // Index exists and it's closed - open it in metadata and start recovery IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN); indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1)); + indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), currentIndexMetaData.getMappingVersion() + 1)); if (!request.includeAliases()) { // Remove all snapshot aliases if (!snapshotIndexMetaData.getAliases().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 67ddabc37fa30..fdbe74d8d4dd9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -76,9 +76,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, private static final String SUCCESSFUL_SHARDS = "successful_shards"; private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; - private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0; private static final Version INCLUDE_GLOBAL_STATE_INTRODUCED = Version.V_6_2_0; - public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0; private static final Comparator COMPARATOR = Comparator.comparing(SnapshotInfo::startTime).thenComparing(SnapshotInfo::snapshotId); @@ -275,11 +273,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { indicesListBuilder.add(in.readString()); } indices = Collections.unmodifiableList(indicesListBuilder); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; - } else { - state = SnapshotState.fromValue(in.readByte()); - } + state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; reason = in.readOptionalString(); startTime = in.readVLong(); endTime = in.readVLong(); @@ -295,11 +289,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { } else { shardFailures = Collections.emptyList(); } - if (in.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - version = Version.readVersion(in); - } else { - version = in.readBoolean() ? Version.readVersion(in) : null; - } + version = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { includeGlobalState = in.readOptionalBoolean(); } @@ -681,19 +671,11 @@ public void writeTo(final StreamOutput out) throws IOException { for (String index : indices) { out.writeString(index); } - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - if (state != null) { - out.writeBoolean(true); - out.writeByte(state.value()); - } else { - out.writeBoolean(false); - } + if (state != null) { + out.writeBoolean(true); + out.writeByte(state.value()); } else { - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED) && state == SnapshotState.INCOMPATIBLE) { - out.writeByte(SnapshotState.FAILED.value()); - } else { - out.writeByte(state.value()); - } + out.writeBoolean(false); } out.writeOptionalString(reason); out.writeVLong(startTime); @@ -704,19 +686,11 @@ public void writeTo(final StreamOutput out) throws IOException { for (SnapshotShardFailure failure : shardFailures) { failure.writeTo(out); } - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - Version versionToWrite = version; - if (versionToWrite == null) { - versionToWrite = Version.CURRENT; - } - Version.writeVersion(versionToWrite, out); + if (version != null) { + out.writeBoolean(true); + Version.writeVersion(version, out); } else { - if (version != null) { - out.writeBoolean(true); - Version.writeVersion(version, out); - } else { - out.writeBoolean(false); - } + out.writeBoolean(false); } if (out.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { out.writeOptionalBoolean(includeGlobalState); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java index f2bdc2ba5df4f..67bf9c6069fe8 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java @@ -102,7 +102,7 @@ public void readFrom(StreamInput in) throws IOException { nodeId = in.readOptionalString(); shardId = ShardId.readShardId(in); super.shardId = shardId.getId(); - super.index = shardId.getIndexName(); + index = shardId.getIndexName(); reason = in.readString(); status = RestStatus.readFrom(in); } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 1ff8b701a83e1..da8faaf3c33f4 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -38,12 +38,12 @@ import java.io.Closeable; import java.io.IOException; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -62,6 +62,7 @@ public class ConnectionManager implements Closeable { private final TimeValue pingSchedule; private final ConnectionProfile defaultProfile; private final Lifecycle lifecycle = new Lifecycle(); + private final AtomicBoolean closed = new AtomicBoolean(false); private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); @@ -83,7 +84,7 @@ public ConnectionManager(Settings settings, Transport transport, ThreadPool thre } public void addListener(TransportConnectionListener listener) { - this.connectionListener.listeners.add(listener); + this.connectionListener.listeners.addIfAbsent(listener); } public void removeListener(TransportConnectionListener listener) { @@ -91,7 +92,8 @@ public void removeListener(TransportConnectionListener listener) { } public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { - return transport.openConnection(node, ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile)); + ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + return internalOpenConnection(node, resolvedProfile); } /** @@ -115,7 +117,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil } boolean success = false; try { - connection = transport.openConnection(node, resolvedProfile); + connection = internalOpenConnection(node, resolvedProfile); connectionValidator.accept(connection, resolvedProfile); // we acquire a connection lock, so no way there is an existing connection connectedNodes.put(node, connection); @@ -185,46 +187,64 @@ public void disconnectFromNode(DiscoveryNode node) { } } - public int connectedNodeCount() { + /** + * Returns the number of nodes this manager is connected to. + */ + public int size() { return connectedNodes.size(); } @Override public void close() { - lifecycle.moveToStopped(); - CountDownLatch latch = new CountDownLatch(1); + if (closed.compareAndSet(false, true)) { + lifecycle.moveToStopped(); + CountDownLatch latch = new CountDownLatch(1); - // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); + // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } } + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore } } finally { - closeLock.writeLock().unlock(); - latch.countDown(); + lifecycle.moveToClosed(); } - }); + } + } + private Transport.Connection internalOpenConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + Transport.Connection connection = transport.openConnection(node, connectionProfile); try { - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore - } + connectionListener.onConnectionOpened(connection); } finally { - lifecycle.moveToClosed(); + connection.addCloseListener(ActionListener.wrap(() -> connectionListener.onConnectionClosed(connection))); } + if (connection.isClosed()) { + throw new ConnectTransportException(node, "a channel closed while connecting"); + } + return connection; } private void ensureOpen() { @@ -274,7 +294,7 @@ public void onFailure(Exception e) { private static final class DelegatingNodeConnectionListener implements TransportConnectionListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); @Override public void onNodeDisconnected(DiscoveryNode key) { @@ -289,6 +309,20 @@ public void onNodeConnected(DiscoveryNode node) { listener.onNodeConnected(node); } } + + @Override + public void onConnectionOpened(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionOpened(connection); + } + } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionClosed(connection); + } + } } static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 107a4b32d89f9..16d3c292bfe32 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -18,9 +18,14 @@ */ package org.elasticsearch.transport; +import java.util.EnumSet; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -48,12 +53,39 @@ public abstract class RemoteClusterAware extends AbstractComponent { /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ - public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.", - "seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress, - Setting.Property.NodeScope, Setting.Property.Dynamic)); + public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( + "search.remote.", + "seeds", + key -> Setting.listSetting( + key, Collections.emptyList(), + s -> { + // validate seed address + parsePort(s); + return s; + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + ); public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; + /** + * A proxy address for the remote cluster. + * NOTE: this settings is undocumented until we have at last one transport that supports passing + * on the hostname via a mechanism like SNI. + */ + public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( + "search.remote.", + "proxy", + key -> Setting.simpleString(key, s -> { + if (Strings.hasLength(s)) { + parsePort(s); + } + return s; + }, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS); + + protected final ClusterNameExpressionResolver clusterNameResolver; /** @@ -65,23 +97,42 @@ protected RemoteClusterAware(Settings settings) { this.clusterNameResolver = new ClusterNameExpressionResolver(settings); } - protected static Map> buildRemoteClustersSeeds(Settings settings) { - Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); + /** + * Builds the dynamic per-cluster config from the given settings. This is a map keyed by the cluster alias that points to a tuple + * (ProxyAddresss, [SeedNodeSuppliers]). If a cluster is configured with a proxy address all seed nodes will point to + * {@link TransportAddress#META_ADDRESS} and their configured address will be used as the hostname for the generated discovery node. + */ + protected static Map>>> buildRemoteClustersDynamicConfig(Settings settings) { + Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); return allConcreteSettings.collect( Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); - List nodes = new ArrayList<>(); - for (InetSocketAddress address : concreteSetting.get(settings)) { - TransportAddress transportAddress = new TransportAddress(address); - DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(), - transportAddress, - Version.CURRENT.minimumCompatibilityVersion()); - nodes.add(node); + List addresses = concreteSetting.get(settings); + final boolean proxyMode = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).exists(settings); + List> nodes = new ArrayList<>(addresses.size()); + for (String address : addresses) { + nodes.add(() -> buildSeedNode(clusterName, address, proxyMode)); } - return nodes; + return new Tuple<>(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).get(settings), nodes); })); } + static DiscoveryNode buildSeedNode(String clusterName, String address, boolean proxyMode) { + if (proxyMode) { + TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 0); + String hostName = address.substring(0, indexOfPortSeparator(address)); + return new DiscoveryNode("", clusterName + "#" + address, UUIDs.randomBase64UUID(), hostName, address, + transportAddress, Collections + .emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), + Version.CURRENT.minimumCompatibilityVersion()); + } else { + TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); + return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + } + } + /** * Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All * indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under @@ -124,41 +175,53 @@ public Map> groupClusterIndices(String[] requestIndices, Pr protected abstract Set getRemoteClusterNames(); + /** * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy); /** * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - clusterSettings.addAffixUpdateConsumer(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, this::updateRemoteCluster, + clusterSettings.addAffixUpdateConsumer(RemoteClusterAware.REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, + (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), (namespace, value) -> {}); } - private static InetSocketAddress parseSeedAddress(String remoteHost) { - int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 - if (portSeparator == -1 || portSeparator == remoteHost.length()) { - throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); - } - String host = remoteHost.substring(0, portSeparator); + + protected static InetSocketAddress parseSeedAddress(String remoteHost) { + String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost)); InetAddress hostAddress; try { hostAddress = InetAddress.getByName(host); } catch (UnknownHostException e) { throw new IllegalArgumentException("unknown host [" + host + "]", e); } + return new InetSocketAddress(hostAddress, parsePort(remoteHost)); + } + + private static int parsePort(String remoteHost) { try { - int port = Integer.valueOf(remoteHost.substring(portSeparator + 1)); + int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1)); if (port <= 0) { throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]"); } - return new InetSocketAddress(hostAddress, port); + return port; } catch (NumberFormatException e) { - throw new IllegalArgumentException("port must be a number", e); + throw new IllegalArgumentException("failed to parse port", e); + } + } + + private static int indexOfPortSeparator(String remoteHost) { + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + if (portSeparator == -1 || portSeparator == remoteHost.length()) { + throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); } + return portSeparator; } public static String buildRemoteIndexName(String clusterAlias, String indexName) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 67c0e1a5aa64a..6b1909434655f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import java.net.InetSocketAddress; +import java.util.function.Supplier; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -34,6 +36,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -79,16 +82,18 @@ final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { private final TransportService transportService; + private final ConnectionManager connectionManager; private final ConnectionProfile remoteProfile; private final ConnectedNodes connectedNodes; private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; - private volatile List seedNodes; + private final ThreadPool threadPool; + private volatile String proxyAddress; + private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); - private final ClusterName localClusterName; /** * Creates a new {@link RemoteClusterConnection} @@ -96,13 +101,21 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param clusterAlias the configured alias of the cluster to connect to * @param seedNodes a list of seed nodes to discover eligible nodes from * @param transportService the local nodes transport service + * @param connectionManager the connection manager to use for this remote connection * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to */ - RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, - TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { + RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, + TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, + Predicate nodePredicate) { + this(settings, clusterAlias, seedNodes, transportService, connectionManager, maxNumRemoteConnections, nodePredicate, null); + } + + RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, + TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, Predicate + nodePredicate, + String proxyAddress) { super(settings); - this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -121,14 +134,31 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE .getConcreteSettingForNamespace(clusterAlias).get(settings); this.connectHandler = new ConnectHandler(); - transportService.addConnectionListener(this); + this.threadPool = transportService.threadPool; + this.connectionManager = connectionManager; + connectionManager.addListener(this); + // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. + connectionManager.addListener(transportService); + this.proxyAddress = proxyAddress; + } + + private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { + if (proxyAddress == null || proxyAddress.isEmpty()) { + return node; + } else { + // resovle proxy address lazy here + InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); + return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node + .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); + } } /** * Updates the list of seed nodes for this cluster connection */ - synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { + synchronized void updateSeedNodes(String proxyAddress, List> seedNodes, ActionListener connectListener) { this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + this.proxyAddress = proxyAddress; connectHandler.connect(connectListener); } @@ -182,8 +212,9 @@ public void ensureConnected(ActionListener voidActionListener) { private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, final ActionListener listener) { - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterSearchShardsAction.NAME, searchShardsRequest, TransportRequestOptions.EMPTY, new TransportResponseHandler() { @Override @@ -218,12 +249,16 @@ void collectNodes(ActionListener> listener) { request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, new TransportResponseHandler() { + @Override - public ClusterStateResponse newInstance() { - return new ClusterStateResponse(); + public ClusterStateResponse read(StreamInput in) throws IOException { + ClusterStateResponse response = new ClusterStateResponse(); + response.readFrom(in); + return response; } @Override @@ -260,14 +295,15 @@ public String executor() { * If such node is not connected, the returned connection will be a proxy connection that redirects to it. */ Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { - if (transportService.nodeConnected(remoteClusterNode)) { - return transportService.getConnection(remoteClusterNode); + if (connectionManager.nodeConnected(remoteClusterNode)) { + return connectionManager.getConnection(remoteClusterNode); } - DiscoveryNode discoveryNode = connectedNodes.getAny(); - Transport.Connection connection = transportService.getConnection(discoveryNode); + DiscoveryNode discoveryNode = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(discoveryNode); return new ProxyConnection(connection, remoteClusterNode); } + static final class ProxyConnection implements Transport.Connection { private final Transport.Connection proxyConnection; private final DiscoveryNode targetNode; @@ -316,33 +352,18 @@ public Version getVersion() { } Transport.Connection getConnection() { - return transportService.getConnection(getAnyConnectedNode()); + return connectionManager.getConnection(getAnyConnectedNode()); } @Override public void close() throws IOException { - connectHandler.close(); + IOUtils.close(connectHandler, connectionManager); } public boolean isClosed() { return connectHandler.isClosed(); } - private ConnectionProfile getRemoteProfile(ClusterName name) { - // we can only compare the cluster name to make a decision if we should use a remote profile - // we can't use a cluster UUID here since we could be connecting to that remote cluster before - // the remote node has joined its cluster and have a cluster UUID. The fact that we just lose a - // rather smallish optimization on the connection layer under certain situations where remote clusters - // have the same name as the local one is minor here. - // the alternative here is to complicate the remote infrastructure to also wait until we formed a cluster, - // gained a cluster UUID and then start connecting etc. we rather use this simplification in order to maintain simplicity - if (this.localClusterName.equals(name)) { - return null; - } else { - return remoteProfile; - } - } - /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -386,7 +407,7 @@ private void connect(ActionListener connectListener, boolean forceRun) { final boolean runConnect; final Collection> toNotify; final ActionListener listener = connectListener == null ? null : - ContextPreservingActionListener.wrapPreservingContext(connectListener, transportService.getThreadPool().getThreadContext()); + ContextPreservingActionListener.wrapPreservingContext(connectListener, threadPool.getThreadContext()); synchronized (queue) { if (listener != null && queue.offer(listener) == false) { listener.onFailure(new RejectedExecutionException("connect queue is full")); @@ -414,7 +435,6 @@ private void connect(ActionListener connectListener, boolean forceRun) { } private void forkConnect(final Collection> toNotify) { - ThreadPool threadPool = transportService.getThreadPool(); ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); executor.submit(new AbstractRunnable() { @Override @@ -451,22 +471,22 @@ protected void doRun() { maybeConnect(); } }); - collectRemoteNodes(seedNodes.iterator(), transportService, listener); + collectRemoteNodes(seedNodes.iterator(), transportService, connectionManager, listener); } }); } - void collectRemoteNodes(Iterator seedNodes, - final TransportService transportService, ActionListener listener) { + private void collectRemoteNodes(Iterator> seedNodes, + final TransportService transportService, final ConnectionManager manager, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); } try { if (seedNodes.hasNext()) { cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = seedNodes.next(); + final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); final TransportService.HandshakeResponse handshakeResponse; - Transport.Connection connection = transportService.openConnection(seedNode, + Transport.Connection connection = manager.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); boolean success = false; try { @@ -479,9 +499,9 @@ void collectRemoteNodes(Iterator seedNodes, throw ex; } - final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); + final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { - transportService.connectToNode(handshakeNode, getRemoteProfile(handshakeResponse.getClusterName())); + manager.connectToNode(handshakeNode, remoteProfile, transportService.connectionValidator(handshakeNode)); if (remoteClusterName.get() == null) { assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); @@ -523,7 +543,7 @@ void collectRemoteNodes(Iterator seedNodes, // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, manager, listener); } else { listener.onFailure(ex); } @@ -551,16 +571,14 @@ final boolean isClosed() { /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ private class SniffClusterStateResponseHandler implements TransportResponseHandler { - private final TransportService transportService; private final Transport.Connection connection; private final ActionListener listener; - private final Iterator seedNodes; + private final Iterator> seedNodes; private final CancellableThreads cancellableThreads; SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, - ActionListener listener, Iterator seedNodes, + ActionListener listener, Iterator> seedNodes, CancellableThreads cancellableThreads) { - this.transportService = transportService; this.connection = connection; this.listener = listener; this.seedNodes = seedNodes; @@ -588,11 +606,12 @@ public void handleResponse(ClusterStateResponse response) { cancellableThreads.executeIO(() -> { DiscoveryNodes nodes = response.getState().nodes(); Iterable nodesIter = nodes.getNodes()::valuesIt; - for (DiscoveryNode node : nodesIter) { + for (DiscoveryNode n : nodesIter) { + DiscoveryNode node = maybeAddProxyAddress(proxyAddress, n); if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { try { - transportService.connectToNode(node, getRemoteProfile(remoteClusterName.get())); // noop if node is - // connected + connectionManager.connectToNode(node, remoteProfile, + transportService.connectionValidator(node)); // noop if node is connected connectedNodes.add(node); } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node @@ -608,7 +627,7 @@ public void handleResponse(ClusterStateResponse response) { listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -619,7 +638,7 @@ public void handleException(TransportException exp) { IOUtils.closeWhileHandlingException(connection); } finally { // once the connection is closed lets try the next node - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -651,7 +670,8 @@ void addConnectedNode(DiscoveryNode node) { * Get the information about remote nodes to be rendered on {@code _remote/info} requests. */ public RemoteConnectionInfo getConnectionInfo() { - List seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()); + List seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect + (Collectors.toList()); TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), initialConnectionTimeout, skipUnavailable); @@ -714,4 +734,8 @@ private synchronized void ensureIteratorAvailable() { } } } + + ConnectionManager getConnectionManager() { + return connectionManager; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index a07de63d53734..60126847cbea9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import java.util.Collection; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -29,10 +31,10 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.internal.io.IOUtils; @@ -40,7 +42,6 @@ import java.io.Closeable; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -115,7 +116,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes * @param connectionListener a listener invoked once every configured cluster has been connected to */ - private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { + private synchronized void updateRemoteClusters(Map>>> seeds, + ActionListener connectionListener) { if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -125,9 +127,12 @@ private synchronized void updateRemoteClusters(Map> } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); - for (Map.Entry> entry : seeds.entrySet()) { + for (Map.Entry>>> entry : seeds.entrySet()) { + List> seedList = entry.getValue().v2(); + String proxyAddress = entry.getValue().v1(); + RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); - if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection + if (seedList.isEmpty()) { // with no seed nodes we just remove the connection try { IOUtils.close(remote); } catch (IOException e) { @@ -138,14 +143,15 @@ private synchronized void updateRemoteClusters(Map> } if (remote == null) { // this is a new cluster we have to add a new representation - remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, - getNodePredicate(settings)); + remote = new RemoteClusterConnection(settings, entry.getKey(), seedList, transportService, + new ConnectionManager(settings, transportService.transport, transportService.threadPool), numRemoteConnections, + getNodePredicate(settings), proxyAddress); remoteClusters.put(entry.getKey(), remote); } // now update the seed nodes no matter if it's new or already existing RemoteClusterConnection finalRemote = remote; - remote.updateSeedNodes(entry.getValue(), ActionListener.wrap( + remote.updateSeedNodes(proxyAddress, seedList, ActionListener.wrap( response -> { if (countDown.countDown()) { connectionListener.onResponse(response); @@ -299,8 +305,7 @@ protected Set getRemoteClusterNames() { @Override public void listenForUpdates(ClusterSettings clusterSettings) { super.listenForUpdates(clusterSettings); - clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, - (clusterAlias, value) -> {}); + clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { @@ -310,21 +315,21 @@ synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavail } } - protected void updateRemoteCluster(String clusterAlias, List addresses) { - updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); + + @Override + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { + updateRemoteCluster(clusterAlias, addresses, proxyAddress, ActionListener.wrap((x) -> {}, (x) -> {})); } void updateRemoteCluster( final String clusterAlias, - final List addresses, + final List addresses, + final String proxyAddress, final ActionListener connectionListener) { - final List nodes = addresses.stream().map(address -> { - final TransportAddress transportAddress = new TransportAddress(address); - final String id = clusterAlias + "#" + transportAddress.toString(); - final Version version = Version.CURRENT.minimumCompatibilityVersion(); - return new DiscoveryNode(id, transportAddress, version); - }).collect(Collectors.toList()); - updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener); + final List> nodes = addresses.stream().>map(address -> () -> + buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress)) + ).collect(Collectors.toList()); + updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener); } /** @@ -334,7 +339,7 @@ void updateRemoteCluster( void initializeRemoteClusters() { final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); final PlainActionFuture future = new PlainActionFuture<>(); - Map> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); + Map>>> seeds = RemoteClusterAware.buildRemoteClustersDynamicConfig(settings); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); @@ -409,4 +414,8 @@ public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) } return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); } + + Collection getConnections() { + return remoteClusters.values(); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 0b82417cfaa04..d71e459fccdf0 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -184,7 +184,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettings; - private final DelegatingTransportConnectionListener transportListener = new DelegatingTransportConnectionListener(); + private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); @@ -248,14 +248,12 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo protected void doStart() { } - @Override - public void addConnectionListener(TransportConnectionListener listener) { - transportListener.listeners.add(listener); + public void addMessageListener(TransportMessageListener listener) { + messageListener.listeners.add(listener); } - @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { - return transportListener.listeners.remove(listener); + public boolean removeMessageListener(TransportMessageListener listener) { + return messageListener.listeners.remove(listener); } @Override @@ -344,10 +342,6 @@ public TcpChannel channel(TransportRequestOptions.Type type) { return connectionTypeHandle.getChannel(channels); } - boolean allChannelsOpen() { - return channels.stream().allMatch(TcpChannel::isOpen); - } - @Override public boolean sendPing() { for (TcpChannel channel : channels) { @@ -447,7 +441,7 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect try { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node.getAddress().address(), connectFuture); + TcpChannel channel = initiateChannel(node, connectFuture); logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); channels.add(channel); } catch (Exception e) { @@ -481,11 +475,6 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect // underlying channels. nodeChannels = new NodeChannels(node, channels, connectionProfile, version); final NodeChannels finalNodeChannels = nodeChannels; - try { - transportListener.onConnectionOpened(nodeChannels); - } finally { - nodeChannels.addCloseListener(ActionListener.wrap(() -> transportListener.onConnectionClosed(finalNodeChannels))); - } Consumer onClose = c -> { assert c.isOpen() == false : "channel is still open when onClose is called"; @@ -493,10 +482,6 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect }; nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch)))); - - if (nodeChannels.allChannelsOpen() == false) { - throw new ConnectTransportException(node, "a channel closed while connecting"); - } success = true; return nodeChannels; } catch (ConnectTransportException e) { @@ -856,12 +841,12 @@ protected void serverAcceptedChannel(TcpChannel channel) { /** * Initiate a single tcp socket channel. * - * @param address address for the initiated connection + * @param node for the initiated connection * @param connectListener listener to be called when connection complete * @return the pending connection * @throws IOException if an I/O exception occurs while opening the channel */ - protected abstract TcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException; + protected abstract TcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException; /** * Called to tear down internal resources @@ -907,7 +892,7 @@ private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel cha final TransportRequestOptions finalOptions = options; // this might be called in a different thread SendListener onRequestSent = new SendListener(channel, stream, - () -> transportListener.onRequestSent(node, requestId, action, request, finalOptions), message.length()); + () -> messageListener.onRequestSent(node, requestId, action, request, finalOptions), message.length()); internalSendMessage(channel, message, onRequestSent); addedReleaseListener = true; } finally { @@ -961,7 +946,7 @@ public void sendErrorResponse( final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); CompositeBytesReference message = new CompositeBytesReference(header, bytes); SendListener onResponseSent = new SendListener(channel, null, - () -> transportListener.onResponseSent(requestId, action, error), message.length()); + () -> messageListener.onResponseSent(requestId, action, error), message.length()); internalSendMessage(channel, message, onResponseSent); } } @@ -1010,7 +995,7 @@ private void sendResponse( final TransportResponseOptions finalOptions = options; // this might be called in a different thread SendListener listener = new SendListener(channel, stream, - () -> transportListener.onResponseSent(requestId, action, response, finalOptions), message.length()); + () -> messageListener.onResponseSent(requestId, action, response, finalOptions), message.length()); internalSendMessage(channel, message, listener); addedReleaseListener = true; } finally { @@ -1266,7 +1251,7 @@ public final void messageReceived(BytesReference reference, TcpChannel channel) if (isHandshake) { handler = pendingHandshakes.remove(requestId); } else { - TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, transportListener); + TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener); if (theHandler == null && TransportStatus.isError(status)) { handler = pendingHandshakes.remove(requestId); } else { @@ -1373,7 +1358,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str features = Collections.emptySet(); } final String action = stream.readString(); - transportListener.onRequestReceived(requestId, action); + messageListener.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { if (TransportStatus.isHandshake(status)) { @@ -1682,26 +1667,27 @@ public ProfileSettings(Settings settings, String profileName) { } } - private static final class DelegatingTransportConnectionListener implements TransportConnectionListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private static final class DelegatingTransportMessageListener implements TransportMessageListener { + + private final List listeners = new CopyOnWriteArrayList<>(); @Override public void onRequestReceived(long requestId, String action) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onRequestReceived(requestId, action); } } @Override public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, response, finalOptions); } } @Override public void onResponseSent(long requestId, String action, Exception error) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, error); } } @@ -1709,42 +1695,14 @@ public void onResponseSent(long requestId, String action, Exception error) { @Override public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions finalOptions) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onRequestSent(node, requestId, action, request, finalOptions); } } - @Override - public void onNodeDisconnected(DiscoveryNode key) { - for (TransportConnectionListener listener : listeners) { - listener.onNodeDisconnected(key); - } - } - - @Override - public void onConnectionOpened(Connection nodeChannels) { - for (TransportConnectionListener listener : listeners) { - listener.onConnectionOpened(nodeChannels); - } - } - - @Override - public void onNodeConnected(DiscoveryNode node) { - for (TransportConnectionListener listener : listeners) { - listener.onNodeConnected(node); - } - } - - @Override - public void onConnectionClosed(Connection nodeChannels) { - for (TransportConnectionListener listener : listeners) { - listener.onConnectionClosed(nodeChannels); - } - } - @Override public void onResponseReceived(long requestId, ResponseContext holder) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseReceived(requestId, holder); } } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 9538119f43b8a..90adf2ab9e7d4 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -56,18 +56,9 @@ public interface Transport extends LifecycleComponent { */ RequestHandlerRegistry getRequestHandler(String action); - /** - * Adds a new event listener - * @param listener the listener to add - */ - void addConnectionListener(TransportConnectionListener listener); + void addMessageListener(TransportMessageListener listener); - /** - * Removes an event listener - * @param listener the listener to remove - * @return true iff the listener was removed otherwise false - */ - boolean removeConnectionListener(TransportConnectionListener listener); + boolean removeMessageListener(TransportMessageListener listener); /** * The address the transport is bound on. @@ -254,7 +245,7 @@ public List prune(Predicate predicate) { * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not * found. */ - public TransportResponseHandler onResponseReceived(final long requestId, TransportConnectionListener listener) { + public TransportResponseHandler onResponseReceived(final long requestId, TransportMessageListener listener) { ResponseContext context = handlers.remove(requestId); listener.onResponseReceived(requestId, context); if (context == null) { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java index 0ee2ed5828d44..c41a328637c22 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java @@ -28,42 +28,6 @@ */ public interface TransportConnectionListener { - /** - * Called once a request is received - * @param requestId the internal request ID - * @param action the request action - * - */ - default void onRequestReceived(long requestId, String action) {} - - /** - * Called for every action response sent after the response has been passed to the underlying network implementation. - * @param requestId the request ID (unique per client) - * @param action the request action - * @param response the response send - * @param finalOptions the response options - */ - default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {} - - /*** - * Called for every failed action response after the response has been passed to the underlying network implementation. - * @param requestId the request ID (unique per client) - * @param action the request action - * @param error the error sent back to the caller - */ - default void onResponseSent(long requestId, String action, Exception error) {} - - /** - * Called for every request sent to a server after the request has been passed to the underlying network implementation - * @param node the node the request was sent to - * @param requestId the internal request id - * @param action the action name - * @param request the actual request - * @param finalOptions the request options - */ - default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions finalOptions) {} - /** * Called once a connection was opened * @param connection the connection @@ -76,13 +40,6 @@ default void onConnectionOpened(Transport.Connection connection) {} */ default void onConnectionClosed(Transport.Connection connection) {} - /** - * Called for every response received - * @param requestId the request id for this reponse - * @param context the response context or null if the context was already processed ie. due to a timeout. - */ - default void onResponseReceived(long requestId, Transport.ResponseContext context) {} - /** * Called once a node connection is opened and registered. */ diff --git a/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java new file mode 100644 index 0000000000000..a872c761b36d0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; + +public interface TransportMessageListener { + + /** + * Called once a request is received + * @param requestId the internal request ID + * @param action the request action + * + */ + default void onRequestReceived(long requestId, String action) {} + + /** + * Called for every action response sent after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param response the response send + * @param finalOptions the response options + */ + default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {} + + /*** + * Called for every failed action response after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param error the error sent back to the caller + */ + default void onResponseSent(long requestId, String action, Exception error) {} + + /** + * Called for every request sent to a server after the request has been passed to the underlying network implementation + * @param node the node the request was sent to + * @param requestId the internal request id + * @param action the action name + * @param request the actual request + * @param finalOptions the request options + */ + default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions finalOptions) {} + + /** + * Called for every response received + * @param requestId the request id for this reponse + * @param context the response context or null if the context was already processed ie. due to a timeout. + */ + default void onResponseReceived(long requestId, Transport.ResponseContext context) {} +} diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 8eca6504b70be..e37ea81211ad7 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -56,6 +57,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; @@ -77,7 +79,7 @@ import static org.elasticsearch.common.settings.Setting.listSetting; import static org.elasticsearch.common.settings.Setting.timeSetting; -public class TransportService extends AbstractLifecycleComponent implements TransportConnectionListener { +public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener { public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); @@ -248,7 +250,8 @@ void setTracerLogExclude(List tracerLogExclude) { @Override protected void doStart() { - transport.addConnectionListener(this); + transport.addMessageListener(this); + connectionManager.addListener(this); transport.start(); if (transport.boundAddress() != null && logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); @@ -267,8 +270,9 @@ protected void doStart() { @Override protected void doStop() { try { - connectionManager.close(); - transport.stop(); + IOUtils.close(connectionManager, remoteClusterService, transport::stop); + } catch (IOException e) { + throw new UncheckedIOException(e); } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles @@ -305,7 +309,7 @@ public void doRun() { @Override protected void doClose() throws IOException { - IOUtils.close(remoteClusterService, transport); + transport.close(); } /** @@ -363,14 +367,18 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection if (isLocalNode(node)) { return; } + connectionManager.connectToNode(node, connectionProfile, connectionValidator(node)); + } - connectionManager.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { + public CheckedBiConsumer connectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile) -> { // We don't validate cluster names to allow for CCS connections. final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true).discoveryNode; if (validateConnections && node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } - }); + }; + } /** @@ -506,12 +514,10 @@ public void disconnectFromNode(DiscoveryNode node) { } public void addConnectionListener(TransportConnectionListener listener) { - transport.addConnectionListener(listener); connectionManager.addListener(listener); } public void removeConnectionListener(TransportConnectionListener listener) { - transport.removeConnectionListener(listener); connectionManager.removeListener(listener); } @@ -563,8 +569,12 @@ public final void sendRequest(final Transport.Conn final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { - - asyncSender.sendRequest(connection, action, request, options, handler); + try { + asyncSender.sendRequest(connection, action, request, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } } /** diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1f62eb706a84b..5c8c25cbfddfe 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -41,8 +41,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -104,7 +102,6 @@ import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; -import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -116,7 +113,6 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singleton; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; public class ExceptionSerializationTests extends ESTestCase { @@ -872,89 +868,12 @@ public void testElasticsearchRemoteException() throws IOException { public void testShardLockObtainFailedException() throws IOException { ShardId shardId = new ShardId("foo", "_na_", 1); ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); - if (version.before(Version.V_5_0_2)) { - version = Version.V_5_0_2; - } + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); ShardLockObtainFailedException ex = serialize(orig, version); assertEquals(orig.getMessage(), ex.getMessage()); assertEquals(orig.getShardId(), ex.getShardId()); } - public void testBWCShardLockObtainFailedException() throws IOException { - ShardId shardId = new ShardId("foo", "_na_", 1); - ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Exception ex = serialize((Exception)orig, randomFrom(Version.V_5_0_0, Version.V_5_0_1)); - assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class)); - assertEquals("shard_lock_obtain_failed_exception: [foo][1]: boom", ex.getMessage()); - } - - public void testBWCHeadersAndMetadata() throws IOException { - //this is a request serialized with headers only, no metadata as they were added in 5.3.0 - BytesReference decoded = new BytesArray(Base64.getDecoder().decode - ("AQ10ZXN0ICBtZXNzYWdlACYtb3JnLmVsYXN0aWNzZWFyY2guRXhjZXB0aW9uU2VyaWFsaXphdGlvblRlc3RzASBFeGNlcHRpb25TZXJpYWxpemF0aW9uVG" + - "VzdHMuamF2YQR0ZXN03wYkc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2Y" + - "QdpbnZva2Uw/v///w8kc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2YQZp" + - "bnZva2U+KHN1bi5yZWZsZWN0LkRlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwBIURlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwuamF2YQZ" + - "pbnZva2UrGGphdmEubGFuZy5yZWZsZWN0Lk1ldGhvZAELTWV0aG9kLmphdmEGaW52b2tl8QMzY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdG" + - "VzdGluZy5SYW5kb21pemVkUnVubmVyARVSYW5kb21pemVkUnVubmVyLmphdmEGaW52b2tlsQ01Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkd" + - "GVzdGluZy5SYW5kb21pemVkUnVubmVyJDgBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0ZYsHNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9t" + - "aXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ5ARVSYW5kb21pemVkUnVubmVyLmphdmEIZXZhbHVhdGWvBzZjb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkMTABFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0Zb0HOWNvbS5jYXJyb3RzZW" + - "FyY2gucmFuZG9taXplZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDVvcmcuY" + - "XBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlU2V0dXBUZWFyZG93bkNoYWluZWQkMQEhVGVzdFJ1bGVTZXR1cFRlYXJkb3duQ2hhaW5lZC5qYXZh" + - "CGV2YWx1YXRlMTBvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLkFic3RyYWN0QmVmb3JlQWZ0ZXJSdWxlJDEBHEFic3RyYWN0QmVmb3JlQWZ0ZXJSdWx" + - "lLmphdmEIZXZhbHVhdGUtMm9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVUaHJlYWRBbmRUZXN0TmFtZSQxAR5UZXN0UnVsZVRocmVhZE" + - "FuZFRlc3ROYW1lLmphdmEIZXZhbHVhdGUwN29yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVBZnRlck1heEZhaWx1cmVzJDEBI" + - "1Rlc3RSdWxlSWdub3JlQWZ0ZXJNYXhGYWlsdXJlcy5qYXZhCGV2YWx1YXRlQCxvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlTWFya0Zh" + - "aWx1cmUkMQEYVGVzdFJ1bGVNYXJrRmFpbHVyZS5qYXZhCGV2YWx1YXRlLzljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGV" + - "zLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdG" + - "luZy5UaHJlYWRMZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wI0Y29tLmNhcnJvdHNlYXJja" + - "C5yYW5kb21pemVkdGVzdGluZy5UaHJlYWRMZWFrQ29udHJvbAEWVGhyZWFkTGVha0NvbnRyb2wuamF2YRJmb3JrVGltZW91dGluZ1Rhc2urBjZj" + - "b20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlRocmVhZExlYWtDb250cm9sJDMBFlRocmVhZExlYWtDb250cm9sLmphdmEIZXZhbHV" + - "hdGXOAzNjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQ1ydW" + - "5TaW5nbGVUZXN0lAc1Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5SYW5kb21pemVkUnVubmVyJDUBFVJhbmRvbWl6ZWRSdW5uZ" + - "XIuamF2YQhldmFsdWF0ZaIGNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ2ARVSYW5kb21pemVk" + - "UnVubmVyLmphdmEIZXZhbHVhdGXUBjVjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkNwEVUmFuZG9" + - "taXplZFJ1bm5lci5qYXZhCGV2YWx1YXRl3wYwb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5BYnN0cmFjdEJlZm9yZUFmdGVyUnVsZSQxARxBYnN0cm" + - "FjdEJlZm9yZUFmdGVyUnVsZS5qYXZhCGV2YWx1YXRlLTljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVud" + - "EFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQvb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZVN0b3JlQ2xhc3NO" + - "YW1lJDEBG1Rlc3RSdWxlU3RvcmVDbGFzc05hbWUuamF2YQhldmFsdWF0ZSlOY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWx" + - "lcy5Ob1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZSQxAShOb1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZS5qYXZhCG" + - "V2YWx1YXRlKE5jb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLk5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSd" + - "WxlJDEBKE5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSdWxlLmphdmEIZXZhbHVhdGUoOWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXpl" + - "ZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDljb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQ5Y29tLmNhcnJvdH" + - "NlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWxlcy5TdGF0ZW1lbnRBZGFwdGVyARVTdGF0ZW1lbnRBZGFwdGVyLmphdmEIZXZhbHVhdGUkM29yZ" + - "y5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQkMQEfVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQuamF2YQhl" + - "dmFsdWF0ZTUsb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZU1hcmtGYWlsdXJlJDEBGFRlc3RSdWxlTWFya0ZhaWx1cmUuamF2YQhldmF" + - "sdWF0ZS83b3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZUlnbm9yZUFmdGVyTWF4RmFpbHVyZXMkMQEjVGVzdFJ1bGVJZ25vcmVBZnRlck" + - "1heEZhaWx1cmVzLmphdmEIZXZhbHVhdGVAMW9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVUZXN0U3VpdGVzJDEBHVRlc3RSd" + - "WxlSWdub3JlVGVzdFN1aXRlcy5qYXZhCGV2YWx1YXRlNjljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVu" + - "dEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5UaHJlYWR" + - "MZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wIQamF2YS5sYW5nLlRocmVhZAELVGhyZWFkLm" + - "phdmEDcnVu6QUABAdoZWFkZXIyAQZ2YWx1ZTIKZXMuaGVhZGVyMwEGdmFsdWUzB2hlYWRlcjEBBnZhbHVlMQplcy5oZWFkZXI0AQZ2YWx1ZTQAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAA")); - - try (StreamInput in = decoded.streamInput()) { - //randomize the version across released and unreleased ones - Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - in.setVersion(version); - ElasticsearchException exception = new ElasticsearchException(in); - assertEquals("test message", exception.getMessage()); - //the headers received as part of a single set get split based on their prefix - assertEquals(2, exception.getHeaderKeys().size()); - assertEquals("value1", exception.getHeader("header1").get(0)); - assertEquals("value2", exception.getHeader("header2").get(0)); - assertEquals(2, exception.getMetadataKeys().size()); - assertEquals("value3", exception.getMetadata("es.header3").get(0)); - assertEquals("value4", exception.getMetadata("es.header4").get(0)); - } - } - private static class UnknownException extends Exception { UnknownException(final String message, final Exception cause) { super(message, cause); diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 74303bfb6d851..4c7dc9eb094b7 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -36,8 +36,8 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_5_3_0; -import static org.elasticsearch.Version.V_6_0_0_beta1; +import static org.elasticsearch.Version.V_6_3_0; +import static org.elasticsearch.Version.V_7_0_0_alpha1; import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -50,30 +50,30 @@ public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_5_3_0.before(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.before(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.before(V_5_3_0), is(false)); + assertThat(V_6_3_0.before(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.before(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.before(V_6_3_0), is(false)); - assertThat(V_5_3_0.onOrBefore(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.onOrBefore(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrBefore(V_5_3_0), is(false)); + assertThat(V_6_3_0.onOrBefore(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.onOrBefore(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrBefore(V_6_3_0), is(false)); - assertThat(V_5_3_0.after(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.after(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.after(V_5_3_0), is(true)); + assertThat(V_6_3_0.after(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.after(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.after(V_6_3_0), is(true)); - assertThat(V_5_3_0.onOrAfter(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.onOrAfter(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrAfter(V_5_3_0), is(true)); + assertThat(V_6_3_0.onOrAfter(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.onOrAfter(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrAfter(V_6_3_0), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_5_3_0, is(lessThan(V_6_0_0_beta1))); - assertThat(V_5_3_0.compareTo(V_5_3_0), is(0)); - assertThat(V_6_0_0_beta1, is(greaterThan(V_5_3_0))); + assertThat(V_6_3_0, is(lessThan(V_7_0_0_alpha1))); + assertThat(V_6_3_0.compareTo(V_6_3_0), is(0)); + assertThat(V_7_0_0_alpha1, is(greaterThan(V_6_3_0))); } public void testMin() { @@ -101,12 +101,12 @@ public void testMax() { } public void testMinimumIndexCompatibilityVersion() { - assertEquals(Version.V_5_0_0, Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); - assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(5000099), Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.fromId(5000099).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_1_1.minimumIndexCompatibilityVersion()); + Version.fromId(5010000).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); + Version.fromId(5000001).minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { @@ -160,31 +160,38 @@ public void testVersionNoPresentInSettings() { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2, - Version.V_5_2_0, Version.V_6_0_0_beta1); + final Version version = Version.V_6_0_0_beta1; assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } public void testMinCompatVersion() { - Version prerelease = VersionUtils.getFirstVersion(); - assertThat(prerelease.minimumCompatibilityVersion(), equalTo(prerelease)); Version major = Version.fromString("2.0.0"); assertThat(Version.fromString("2.0.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.2.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major)); - // from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is + + Version major5x = Version.fromString("5.0.0"); + assertThat(Version.fromString("5.0.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.2.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.3.0").minimumCompatibilityVersion(), equalTo(major5x)); + + Version major56x = Version.fromString("5.6.0"); + assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); + + // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_5_6_0; // TODO: remove this once min compat version is a constant instead of method - assertEquals(lastVersion.major, Version.V_6_0_0_beta1.minimumCompatibilityVersion().major); + Version lastVersion = Version.V_6_5_0; // TODO: remove this once min compat version is a constant instead of method + assertEquals(lastVersion.major, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", - lastVersion.minor, Version.V_6_0_0_beta1.minimumCompatibilityVersion().minor); - assertEquals(0, Version.V_6_0_0_beta1.minimumCompatibilityVersion().revision); + lastVersion.minor, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().minor); + assertEquals(0, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().revision); } public void testToString() { // with 2.0.beta we lowercase assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString()); - assertEquals("5.0.0-alpha1", Version.V_5_0_0_alpha1.toString()); + assertEquals("5.0.0-alpha1", Version.fromId(5000001).toString()); assertEquals("2.3.0", Version.fromString("2.3.0").toString()); assertEquals("0.90.0.Beta1", Version.fromString("0.90.0.Beta1").toString()); assertEquals("1.0.0.Beta1", Version.fromString("1.0.0.Beta1").toString()); @@ -334,11 +341,11 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertTrue(isCompatible(Version.V_5_6_0, Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0)); - assertFalse(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0"))); - assertFalse(isCompatible(Version.fromString("6.0.0-alpha1"), Version.fromString("7.0.0"))); + assertTrue(isCompatible(Version.V_6_5_0, Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); + assertFalse(isCompatible(Version.fromString("7.0.0"), Version.fromString("8.0.0"))); + assertFalse(isCompatible(Version.fromString("7.0.0-alpha1"), Version.fromString("8.0.0"))); final Version currentMajorVersion = Version.fromId(Version.CURRENT.major * 1000000 + 99); final Version currentOrNextMajorVersion; @@ -373,8 +380,8 @@ public void testIsCompatible() { isCompatible(VersionUtils.getPreviousMinorVersion(), currentOrNextMajorVersion), equalTo(isCompatible)); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("6.0.0"))); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("7.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("6.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("7.0.0"))); Version a = randomVersion(random()); Version b = randomVersion(random()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 7952758240544..3f9e258ffec1c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -20,11 +20,9 @@ package org.elasticsearch.action.admin; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -44,11 +42,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.containsString; public class ReloadSecureSettingsIT extends ESIntegTestCase { @@ -62,7 +60,7 @@ public void testMissingKeystoreFile() throws Exception { Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -96,44 +94,6 @@ public void onFailure(Exception e) { assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); } - public void testNullKeystorePassword() throws Exception { - final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); - final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); - final AtomicReference reloadSettingsError = new AtomicReference<>(); - final int initialReloadCount = mockReloadablePlugin.getReloadCount(); - final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - reloadSettingsError.set(new AssertionError("Null keystore password should fail")); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - assertThat(e, instanceOf(ActionRequestValidationException.class)); - assertThat(e.getMessage(), containsString("secure settings password cannot be null")); - } catch (final AssertionError ae) { - reloadSettingsError.set(ae); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - if (reloadSettingsError.get() != null) { - throw reloadSettingsError.get(); - } - // in the null password case no reload should be triggered - assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); - } - public void testInvalidKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) @@ -149,7 +109,7 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -181,52 +141,6 @@ public void onFailure(Exception e) { assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); } - public void testWrongKeystorePassword() throws Exception { - final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); - final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); - final Environment environment = internalCluster().getInstance(Environment.class); - final AtomicReference reloadSettingsError = new AtomicReference<>(); - final int initialReloadCount = mockReloadablePlugin.getReloadCount(); - // "some" keystore should be present in this case - writeEmptyKeystore(environment, new char[0]); - final CountDownLatch latch = new CountDownLatch(1); - client().admin() - .cluster() - .prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException(), instanceOf(SecurityException.class)); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); - latch.countDown(); - } - }); - latch.await(); - if (reloadSettingsError.get() != null) { - throw reloadSettingsError.get(); - } - // in the wrong password case no reload should be triggered - assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); - } - public void testMisbehavingPlugin() throws Exception { final Environment environment = internalCluster().getInstance(Environment.class); final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); @@ -247,7 +161,7 @@ public void testMisbehavingPlugin() throws Exception { .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) .toString(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -314,7 +228,7 @@ protected Collection> nodePlugins() { private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 7bf43b828c05a..3384efcf836c6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -49,7 +49,6 @@ import java.util.List; import java.util.Map; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java index 232259948fb2f..5f5fe54321bbb 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java @@ -54,7 +54,7 @@ public void testSerialization() throws Exception { request.routing(routings); } - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try (BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); request.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 90eb7cdcfd46a..f685be02141ad 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -77,7 +77,7 @@ public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.addAll(searchModule.getNamedWriteables()); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try(BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); clusterSearchShardsResponse.writeTo(out); @@ -93,11 +93,7 @@ public void testSerialization() throws Exception { assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId()); assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards()); } - if (version.onOrAfter(Version.V_5_1_1)) { - assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); - } else { - assertNull(deserialized.getIndicesAndFilters()); - } + assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index 0cb0063727fe7..c0685d5d17d29 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.AbstractStreamableXContentTestCase; @@ -67,25 +64,6 @@ protected CreateIndexResponse doParseInstance(XContentParser parser) { return CreateIndexResponse.fromXContent(parser); } - public void testSerializationWithOldVersion() throws IOException { - Version oldVersion = Version.V_5_4_0; - CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(oldVersion); - response.writeTo(output); - - try (StreamInput in = output.bytes().streamInput()) { - in.setVersion(oldVersion); - CreateIndexResponse serialized = new CreateIndexResponse(); - serialized.readFrom(in); - assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); - assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); - assertNull(serialized.index()); - } - } - } - public void testToXContent() { CreateIndexResponse response = new CreateIndexResponse(true, false, "index_name"); String output = Strings.toString(response); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 86c2b67be9c54..5243ffd33b39c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -19,20 +19,14 @@ package org.elasticsearch.action.admin.indices.mapping.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.index.Index; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; @@ -87,27 +81,6 @@ public void testBuildFromSimplifiedDef() { assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage()); } - public void testPutMappingRequestSerialization() throws IOException { - PutMappingRequest request = new PutMappingRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.source(mapping, XContentType.YAML); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.source()); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - bytesStreamOutput.setVersion(version); - request.writeTo(bytesStreamOutput); - try (StreamInput in = StreamInput.wrap(bytesStreamOutput.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutMappingRequest serialized = new PutMappingRequest(); - serialized.readFrom(in); - - String source = serialized.source(); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), source); - } - } - } - public void testToXContent() throws IOException { PutMappingRequest request = new PutMappingRequest("foo"); request.type("my_type"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index f0e9a57f7f3e6..39f04c6b7b092 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -69,7 +69,7 @@ public void testIndexTemplateInvalidNumberOfShards() { containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1")); assertThat(throwables.get(0).getMessage(), containsString("unknown value for [index.shard.check_on_startup] " + - "must be one of [true, false, fix, checksum] but was: blargh")); + "must be one of [true, false, checksum] but was: blargh")); } public void testIndexTemplateValidationAccumulatesValidationErrors() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index c21e6b3c225f0..2d037d7c024d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -18,25 +18,16 @@ */ package org.elasticsearch.action.admin.indices.template.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static org.hamcrest.Matchers.containsString; @@ -46,81 +37,6 @@ import static org.hamcrest.core.Is.is; public class PutIndexTemplateRequestTests extends AbstractXContentTestCase { - - // bwc for #21009 - public void testPutIndexTemplateRequest510() throws IOException { - PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("test"); - putRequest.patterns(Collections.singletonList("test*")); - putRequest.order(5); - - PutIndexTemplateRequest multiPatternRequest = new PutIndexTemplateRequest("test"); - multiPatternRequest.patterns(Arrays.asList("test*", "*test2", "*test3*")); - multiPatternRequest.order(5); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code. - // Note: Instead of a list for the template, in 5_0_0 the element was provided as a string. - String putRequestBytes = "ADwDAAR0ZXN0BXRlc3QqAAAABQAAAAAAAA=="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(putRequestBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - PutIndexTemplateRequest readRequest = new PutIndexTemplateRequest(); - readRequest.readFrom(in); - assertEquals(putRequest.patterns(), readRequest.patterns()); - assertEquals(putRequest.order(), readRequest.order()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiPatternRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - - public void testPutIndexTemplateRequestSerializationXContent() throws IOException { - PutIndexTemplateRequest request = new PutIndexTemplateRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.patterns(Collections.singletonList("foo")); - request.mapping("bar", mapping, XContentType.YAML); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - - try (StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutIndexTemplateRequest serialized = new PutIndexTemplateRequest(); - serialized.readFrom(in); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), - serialized.mappings().get("bar")); - } - } - } - - public void testPutIndexTemplateRequestSerializationXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("ADwDAANmb28IdGVtcGxhdGUAAAAAAAABA2Jhcg8tLS0KZm9vOiAiYmFyIgoAAAAAAAAAAAAAAAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PutIndexTemplateRequest request = new PutIndexTemplateRequest(); - request.readFrom(in); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - assertEquals("foo", request.name()); - assertEquals("template", request.patterns().get(0)); - } - } - public void testValidateErrorMessage() throws Exception { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); ActionRequestValidationException withoutNameAndPattern = request.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 4c0dacc8a6e73..8b68d2b6bb9bd 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.ingest.PipelineExecutionService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -90,9 +89,6 @@ public class TransportBulkActionIngestTests extends ESTestCase { ClusterService clusterService; IngestService ingestService; - /** The ingest execution service we can capture calls to */ - PipelineExecutionService executionService; - /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor ArgumentCaptor> failureHandler; @@ -207,8 +203,6 @@ public void setupAction() { }).when(clusterService).addStateApplier(any(ClusterStateApplier.class)); // setup the mocked ingest service for capturing calls ingestService = mock(IngestService.class); - executionService = mock(PipelineExecutionService.class); - when(ingestService.getPipelineExecutionService()).thenReturn(executionService); action = new TestTransportBulkAction(); singleItemBulkWriteAction = new TestSingleItemBulkWriteAction(action); reset(transportService); // call on construction of action @@ -265,7 +259,7 @@ public void testIngestLocal() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -299,7 +293,7 @@ public void testSingleItemBulkActionIngestLocal() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -331,7 +325,7 @@ public void testIngestForward() throws Exception { action.execute(null, bulkRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -375,7 +369,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception { singleItemBulkWriteAction.execute(null, indexRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -423,7 +417,7 @@ public void testUseDefaultPipeline() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -455,7 +449,7 @@ public void testCreateIndexBeforeRunPipeline() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index b38240632421a..90b730660ddd9 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -28,11 +28,15 @@ import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Predicate; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; + public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { @@ -48,22 +52,46 @@ protected FieldCapabilitiesResponse createBlankInstance() { @Override protected FieldCapabilitiesResponse createTestInstance() { - Map> responses = new HashMap<>(); + if (randomBoolean()) { + // merged responses + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); + } + responses.put(field, typesToCapabilities); + } + return new FieldCapabilitiesResponse(responses); + } else { + // non-merged responses + List responses = new ArrayList<>(); + int numResponse = randomIntBetween(0, 10); + for (int i = 0; i < numResponse; i++) { + responses.add(createRandomIndexResponse()); + } + return new FieldCapabilitiesResponse(responses); + } + } + + + private FieldCapabilitiesIndexResponse createRandomIndexResponse() { + Map responses = new HashMap<>(); String[] fields = generateRandomStringArray(5, 10, false, true); assertNotNull(fields); for (String field : fields) { - Map typesToCapabilities = new HashMap<>(); - String[] types = generateRandomStringArray(5, 10, false, false); - assertNotNull(types); - - for (String type : types) { - typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); - } - responses.put(field, typesToCapabilities); + responses.put(field, FieldCapabilitiesTests.randomFieldCaps(field)); } - return new FieldCapabilitiesResponse(responses); + return new FieldCapabilitiesIndexResponse(randomAsciiLettersOfLength(10), responses); } @Override @@ -138,6 +166,11 @@ public void testToXContent() throws IOException { "}").replaceAll("\\s+", ""), generatedResponse); } + public void testEmptyResponse() throws IOException { + FieldCapabilitiesResponse testInstance = new FieldCapabilitiesResponse(); + assertSerialization(testInstance); + } + private static FieldCapabilitiesResponse createSimpleResponse() { Map titleCapabilities = new HashMap<>(); titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java index b0c6d717bb38e..1711d16891083 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.CompoundProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; import org.elasticsearch.test.ESTestCase; @@ -53,7 +53,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { - private PipelineStore store; + private IngestService ingestService; @Before public void init() throws IOException { @@ -62,9 +62,9 @@ public void init() throws IOException { Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, null, pipelineCompoundProcessor); Map registry = Collections.singletonMap("mock_processor", (factories, tag, config) -> processor); - store = mock(PipelineStore.class); - when(store.get(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); - when(store.getProcessorFactories()).thenReturn(registry); + ingestService = mock(IngestService.class); + when(ingestService.getPipeline(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); + when(ingestService.getProcessorFactories()).thenReturn(registry); } public void testParseUsingPipelineStore() throws Exception { @@ -94,7 +94,8 @@ public void testParseUsingPipelineStore() throws Exception { expectedDocs.add(expectedDoc); } - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = + SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -182,7 +183,7 @@ public void testParseWithProvidedPipeline() throws Exception { requestContent.put(Fields.PIPELINE, pipelineConfig); - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -208,7 +209,7 @@ public void testNullPipelineId() { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("param [pipeline] is null")); } @@ -218,7 +219,7 @@ public void testNonExistentPipelineId() { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("pipeline [" + pipelineId + "] does not exist")); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java index 5cd82be8cb04c..53c307c430815 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +27,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Base64; import static org.hamcrest.CoreMatchers.equalTo; @@ -68,22 +66,4 @@ public void testSerializationWithXContent() throws IOException { assertEquals(XContentType.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } - - public void testSerializationWithXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAnt9AAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - SimulatePipelineRequest request = new SimulatePipelineRequest(in); - assertEquals(XContentType.JSON, request.getXContentType()); - assertEquals("{}", request.getSource().utf8ToString()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 8b1741967734c..50bbad16ab73b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.Transport; import java.io.IOException; @@ -110,17 +109,6 @@ public void run() throws IOException { } } - public void testOldNodesTriggerException() { - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null); - DiscoveryNode node = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), - VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_6_0))); - SearchAsyncActionTests.MockConnection mockConnection = new SearchAsyncActionTests.MockConnection(node); - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, - () -> searchTransportService.sendCanMatch(mockConnection, null, null, null)); - assertEquals("can_match is not supported on pre 5.6 nodes", illegalArgumentException.getMessage()); - } - public void testFilterWithFailure() throws InterruptedException { final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java index e96a0975fd46c..9fbf3704fff21 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; @@ -38,8 +37,6 @@ import java.io.IOException; -import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.hasSize; @@ -87,56 +84,6 @@ public void testToXContent() throws IOException { "}" + "}" + "]}", Strings.toString(exception)); - - // Failures are NOT grouped - ToXContent.MapParams params = new ToXContent.MapParams(singletonMap("group_shard_failures", "false")); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - exception.toXContent(builder, params); - builder.endObject(); - - assertEquals("{" + - "\"type\":\"search_phase_execution_exception\"," + - "\"reason\":\"all shards failed\"," + - "\"phase\":\"test\"," + - "\"grouped\":false," + - "\"failed_shards\":[" + - "{" + - "\"shard\":0," + - "\"index\":\"foo\"," + - "\"node\":\"node_1\"," + - "\"reason\":{" + - "\"type\":\"parsing_exception\"," + - "\"reason\":\"foobar\"," + - "\"line\":1," + - "\"col\":2" + - "}" + - "}," + - "{" + - "\"shard\":1," + - "\"index\":\"foo\"," + - "\"node\":\"node_2\"," + - "\"reason\":{" + - "\"type\":\"index_shard_closed_exception\"," + - "\"reason\":\"CurrentState[CLOSED] Closed\"," + - "\"index_uuid\":\"_na_\"," + - "\"shard\":\"1\"," + - "\"index\":\"foo\"" + - "}" + - "}," + - "{" + - "\"shard\":2," + - "\"index\":\"foo\"," + - "\"node\":\"node_3\"," + - "\"reason\":{" + - "\"type\":\"parsing_exception\"," + - "\"reason\":\"foobar\"," + - "\"line\":5," + - "\"col\":7" + - "}" + - "}" + - "]}", Strings.toString(builder)); - } } public void testToAndFromXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 87e66477a0411..d6fbf59d941b9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -47,13 +47,11 @@ import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; -import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.List; @@ -183,7 +181,7 @@ public void testFromXContentWithFailures() throws IOException { int numFailures = randomIntBetween(1, 5); ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; for (int i = 0; i < failures.length; i++) { - failures[i] = ShardSearchFailureTests.createTestItem(); + failures[i] = ShardSearchFailureTests.createTestItem(IndexMetaData.INDEX_UUID_NA_VALUE); } SearchResponse response = createTestItem(failures); XContentType xcontentType = randomFrom(XContentType.values()); @@ -290,27 +288,4 @@ public void testSerialization() throws IOException { assertEquals(searchResponse.getClusters(), serialized.getClusters()); } } - - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAAAAAAAAAgABBQUAAAoAAAAAAAAA"); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0); - try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), namedWriteableRegistry)) { - in.setVersion(version); - SearchResponse deserialized = new SearchResponse(); - deserialized.readFrom(in); - assertSame(SearchResponse.Clusters.EMPTY, deserialized.getClusters()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - deserialized.writeTo(out); - try (StreamInput in2 = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytesRef().bytes), - namedWriteableRegistry)) { - in2.setVersion(version); - SearchResponse deserialized2 = new SearchResponse(); - deserialized2.readFrom(in2); - assertSame(SearchResponse.Clusters.EMPTY, deserialized2.getClusters()); - } - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java b/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java index bd892829c9548..f62f874c9e2c9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; @@ -38,7 +39,7 @@ public class ShardSearchFailureTests extends ESTestCase { - public static ShardSearchFailure createTestItem() { + public static ShardSearchFailure createTestItem(String indexUuid) { String randomMessage = randomAlphaOfLengthBetween(3, 20); Exception ex = new ParsingException(0, 0, randomMessage , new IllegalArgumentException("some bad argument")); SearchShardTarget searchShardTarget = null; @@ -47,7 +48,7 @@ public static ShardSearchFailure createTestItem() { String indexName = randomAlphaOfLengthBetween(5, 10); String clusterAlias = randomBoolean() ? randomAlphaOfLengthBetween(5, 10) : null; searchShardTarget = new SearchShardTarget(nodeId, - new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), clusterAlias, OriginalIndices.NONE); + new ShardId(new Index(indexName, indexUuid), randomInt()), clusterAlias, OriginalIndices.NONE); } return new ShardSearchFailure(ex, searchShardTarget); } @@ -66,7 +67,7 @@ public void testFromXContentWithRandomFields() throws IOException { } private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - ShardSearchFailure response = createTestItem(); + ShardSearchFailure response = createTestItem(IndexMetaData.INDEX_UUID_NA_VALUE); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); @@ -134,4 +135,15 @@ public void testToXContentWithClusterAlias() throws IOException { + "}", xContent.utf8ToString()); } + + public void testSerialization() throws IOException { + ShardSearchFailure testItem = createTestItem(randomAlphaOfLength(12)); + ShardSearchFailure deserializedInstance = copyStreamable(testItem, writableRegistry(), + ShardSearchFailure::new, VersionUtils.randomVersion(random())); + assertEquals(testItem.index(), deserializedInstance.index()); + assertEquals(testItem.shard(), deserializedInstance.shard()); + assertEquals(testItem.shardId(), deserializedInstance.shardId()); + assertEquals(testItem.reason(), deserializedInstance.reason()); + assertEquals(testItem.status(), deserializedInstance.status()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java index cec5e27f0763e..5bf48fa589764 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static org.hamcrest.CoreMatchers.anyOf; @@ -130,29 +129,6 @@ public void testFailuresDeduplication() throws IOException { assertThat(parsedFailures[1].shardId(), equalTo(2)); assertThat(parsedFailures[1].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(parsedFailures[1].getCause().getMessage(), containsString("fizz")); - - ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("group_shard_failures", "false")); - BytesReference bytesReferenceWithoutDedup = toShuffledXContent(response, xContentType, params, humanReadable); - try(XContentParser parser = createParser(xContentType.xContent(), bytesReferenceWithoutDedup)) { - parsedResponse = doParseInstance(parser); - assertNull(parser.nextToken()); - } - - assertThat(parsedResponse.getShardFailures().length, equalTo(3)); - parsedFailures = parsedResponse.getShardFailures(); - for (int i = 0; i < 3; i++) { - if (i < 2) { - assertThat(parsedFailures[i].index(), equalTo("test")); - assertThat(parsedFailures[i].shardId(), equalTo(i)); - assertThat(parsedFailures[i].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); - assertThat(parsedFailures[i].getCause().getMessage(), containsString("foo")); - } else { - assertThat(parsedFailures[i].index(), equalTo("test")); - assertThat(parsedFailures[i].shardId(), equalTo(i)); - assertThat(parsedFailures[i].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); - assertThat(parsedFailures[i].getCause().getMessage(), containsString("fizz")); - } - } } public void testToXContent() { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index a16a8f628f98b..216c1802956e8 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -36,14 +36,11 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -60,7 +57,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.EnumSet; import java.util.HashSet; import java.util.Set; @@ -264,34 +260,6 @@ public void testStreamRequest() throws IOException { } } - public void testStreamRequestWithXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAABBWluZGV4BHR5cGUCaWQBAnt9AAABDnNvbWVQcmVmZXJlbmNlFgAAAAEA//////////0AAAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - TermVectorsRequest request = new TermVectorsRequest(); - request.readFrom(in); - assertEquals("index", request.index()); - assertEquals("type", request.type()); - assertEquals("id", request.id()); - assertTrue(request.offsets()); - assertFalse(request.fieldStatistics()); - assertTrue(request.payloads()); - assertFalse(request.positions()); - assertTrue(request.termStatistics()); - assertEquals("somePreference", request.preference()); - assertEquals("{}", request.doc().utf8ToString()); - assertEquals(XContentType.JSON, request.xContentType()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - public void testFieldTypeToTermVectorString() throws Exception { FieldType ft = new FieldType(); ft.setStoreTermVectorOffsets(false); diff --git a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index 8180dd96e8e18..1e18135f4eb72 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -52,7 +52,7 @@ public class BootstrapChecksTests extends ESTestCase { - private static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); + static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); public void testNonProductionMode() throws NodeValidationException { // nothing should happen since we are in non-production mode @@ -356,31 +356,6 @@ long getRlimInfinity() { BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); } - public void testMaxMapCountCheck() throws NodeValidationException { - final int limit = 1 << 18; - final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, limit - 1)); - final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() { - @Override - long getMaxMapCount() { - return maxMapCount.get(); - } - }; - - final NodeValidationException e = expectThrows( - NodeValidationException.class, - () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check))); - assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count")); - - maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - - // nothing should happen if current vm.max_map_count is not - // available - maxMapCount.set(-1); - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - } - public void testClientJvmCheck() throws NodeValidationException { final AtomicReference vmName = new AtomicReference<>("Java HotSpot(TM) 32-Bit Client VM"); final BootstrapCheck check = new BootstrapChecks.ClientJvmCheck() { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa3b..9a964a97bd73c 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -24,16 +24,21 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import java.io.BufferedReader; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; import static org.hamcrest.CoreMatchers.equalTo; @@ -45,6 +50,66 @@ public class MaxMapCountCheckTests extends ESTestCase { + // initialize as if the max map count is under the limit, tests can override by setting maxMapCount before executing the check + private final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) - 1)); + private final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() { + @Override + long getMaxMapCount() { + return maxMapCount.get(); + } + }; + + private void assertFailure(final BootstrapCheck.BootstrapCheckResult result) { + assertTrue(result.isFailure()); + assertThat( + result.getMessage(), + equalTo( + "max virtual memory areas vm.max_map_count [" + maxMapCount.get() + "] is too low, " + + "increase to at least [" + BootstrapChecks.MaxMapCountCheck.LIMIT + "]")); + } + + public void testMaxMapCountCheckBelowLimit() { + assertFailure(check.check(BootstrapChecksTests.defaultContext)); + } + + public void testMaxMapCountCheckBelowLimitAndMemoryMapAllowed() { + /* + * There are two ways that memory maps are allowed: + * - by default + * - mmapfs is explicitly allowed + * We want to test that if mmapfs is allowed then the max map count check is enforced. + */ + final List settingsThatAllowMemoryMap = new ArrayList<>(); + settingsThatAllowMemoryMap.add(Settings.EMPTY); + settingsThatAllowMemoryMap.add(Settings.builder().put("node.store.allow_mmapfs", true).build()); + + for (final Settings settingThatAllowsMemoryMap : settingsThatAllowMemoryMap) { + assertFailure(check.check(new BootstrapContext(settingThatAllowsMemoryMap, MetaData.EMPTY_META_DATA))); + } + } + + public void testMaxMapCountCheckNotEnforcedIfMemoryMapNotAllowed() { + // nothing should happen if current vm.max_map_count is under the limit but mmapfs is not allowed + final Settings settings = Settings.builder().put("node.store.allow_mmapfs", false).build(); + final BootstrapContext context = new BootstrapContext(settings, MetaData.EMPTY_META_DATA); + final BootstrapCheck.BootstrapCheckResult result = check.check(context); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckAboveLimit() { + // nothing should happen if current vm.max_map_count exceeds the limit + maxMapCount.set(randomIntBetween(Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) + 1, Integer.MAX_VALUE)); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckMaxMapCountNotAvailable() { + // nothing should happen if current vm.max_map_count is not available + maxMapCount.set(-1); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + public void testGetMaxMapCountOnLinux() { if (Constants.LINUX) { final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck(); @@ -142,7 +207,7 @@ private ParameterizedMessageLoggingExpectation( } @Override - public void match(LogEvent event) { + public void match(final LogEvent event) { if (event.getLevel().equals(level) && event.getLoggerName().equals(loggerName) && event.getMessage() instanceof ParameterizedMessage) { diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 513f07b733cda..e9ff4048bfe15 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -38,8 +38,8 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -62,7 +62,7 @@ abstract class FailAndRetryMockTransport imp private volatile Map requestHandlers = Collections.emptyMap(); private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportConnectionListener listener; + private TransportMessageListener listener; private boolean connectMode = true; @@ -223,13 +223,15 @@ public RequestHandlerRegistry getRequestHandler(String action) { return requestHandlers.get(action); } + @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { this.listener = listener; } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { throw new UnsupportedOperationException(); } + } diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index afc6b47483eba..9baf2e1c95644 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -379,10 +379,10 @@ public void testSniffNodesSamplerClosesConnections() throws Exception { transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); transportClientNodesService.doSample(); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); establishedConnections.clear(); handler.blockRequest(); diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 92c1016d3e615..b8c39c48f8870 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -37,9 +37,9 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -107,7 +107,6 @@ public void testConnectAndDisconnect() { assertConnectedExactlyToNodes(event.state()); } - public void testReconnect() { List nodes = generateNodes(); NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, threadPool, transportService); @@ -135,7 +134,7 @@ public void testReconnect() { private void assertConnectedExactlyToNodes(ClusterState state) { assertConnected(state.nodes()); - assertThat(transportService.getConnectionManager().connectedNodeCount(), equalTo(state.nodes().getSize())); + assertThat(transportService.getConnectionManager().size(), equalTo(state.nodes().getSize())); } private void assertConnected(Iterable nodes) { @@ -188,7 +187,7 @@ public HandshakeResponse handshake(Transport.Connection connection, long timeout private final class MockTransport implements Transport { private ResponseHandlers responseHandlers = new ResponseHandlers(); private volatile boolean randomConnectionExceptions = false; - private TransportConnectionListener listener = new TransportConnectionListener() { + private TransportMessageListener listener = new TransportMessageListener() { }; @Override @@ -201,12 +200,12 @@ public RequestHandlerRegistry getRequestHandler(String action) { } @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { this.listener = listener; } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { throw new UnsupportedOperationException(); } @@ -231,7 +230,6 @@ public Connection openConnection(DiscoveryNode node, ConnectionProfile connectio if (randomConnectionExceptions && randomBoolean()) { throw new ConnectTransportException(node, "simulated"); } - listener.onNodeConnected(node); } Connection connection = new Connection() { @Override @@ -260,7 +258,6 @@ public boolean isClosed() { return false; } }; - listener.onConnectionOpened(connection); return connection; } diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 2cd8a2c27c714..df97854cc35b0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.ack; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ClusterScope(minNumDataNodes = 2) +@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32767") public class AckIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java new file mode 100644 index 0000000000000..341022030b374 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class DiffableStringMapTests extends ESTestCase { + + public void testDiffableStringMapDiff() { + Map m = new HashMap<>(); + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + DiffableStringMap dsm = new DiffableStringMap(m); + + Map m2 = new HashMap<>(); + m2.put("foo", "not-bar"); + m2.put("newkey", "yay"); + m2.put("baz", "eggplant"); + DiffableStringMap dsm2 = new DiffableStringMap(m2); + + Diff diff = dsm2.diff(dsm); + assertThat(diff, instanceOf(DiffableStringMap.DiffableStringMapDiff.class)); + DiffableStringMap.DiffableStringMapDiff dsmd = (DiffableStringMap.DiffableStringMapDiff) diff; + + assertThat(dsmd.getDeletes(), containsInAnyOrder("potato")); + assertThat(dsmd.getDiffs().size(), equalTo(0)); + Map upserts = new HashMap<>(); + upserts.put("foo", "not-bar"); + upserts.put("newkey", "yay"); + assertThat(dsmd.getUpserts(), equalTo(upserts)); + + DiffableStringMap dsm3 = diff.apply(dsm); + assertThat(dsm3.get("foo"), equalTo("not-bar")); + assertThat(dsm3.get("newkey"), equalTo("yay")); + assertThat(dsm3.get("baz"), equalTo("eggplant")); + assertThat(dsm3.get("potato"), equalTo(null)); + } + + public void testRandomDiffing() { + Map m = new HashMap<>(); + m.put("1", "1"); + m.put("2", "2"); + m.put("3", "3"); + DiffableStringMap dsm = new DiffableStringMap(m); + DiffableStringMap expected = new DiffableStringMap(m); + + for (int i = 0; i < randomIntBetween(5, 50); i++) { + if (randomBoolean() && expected.size() > 1) { + expected.remove(randomFrom(expected.keySet())); + } else if (randomBoolean()) { + expected.put(randomFrom(expected.keySet()), randomAlphaOfLength(4)); + } else { + expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4)); + } + dsm = expected.diff(dsm).apply(dsm); + } + assertThat(expected, equalTo(dsm)); + } + + public void testSerialization() throws IOException { + Map m = new HashMap<>(); + // Occasionally have an empty map + if (frequently()) { + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + } + DiffableStringMap dsm = new DiffableStringMap(m); + + BytesStreamOutput bso = new BytesStreamOutput(); + dsm.writeTo(bso); + DiffableStringMap deserialized = new DiffableStringMap(bso.bytes().streamInput()); + assertThat(deserialized, equalTo(dsm)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 744a29e843c48..1aaec08030750 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -56,11 +56,11 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Map; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; -import java.util.Collections; -import java.util.Arrays; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -71,13 +71,13 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IndexCreationTaskTests extends ESTestCase { @@ -127,14 +127,12 @@ public void testApplyDataFromTemplate() throws Exception { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1")) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "value1")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } @@ -142,41 +140,31 @@ public void testApplyDataFromTemplate() throws Exception { public void testApplyDataFromRequest() throws Exception { setupRequestAlias(new Alias("alias1")); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } public void testRequestDataHavePriorityOverTemplateData() throws Exception { - final IndexMetaData.Custom tplCustom = createCustom(); - final IndexMetaData.Custom reqCustom = createCustom(); - final IndexMetaData.Custom mergedCustom = createCustom(); - when(reqCustom.mergeWith(tplCustom)).thenReturn(mergedCustom); - final CompressedXContent tplMapping = createMapping("text"); final CompressedXContent reqMapping = createMapping("keyword"); addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", tplMapping) - .putCustom("custom1", tplCustom) .settings(Settings.builder().put("key1", "tplValue")) ); setupRequestAlias(new Alias("alias1").searchRouting("fromReq")); setupRequestMapping("mapping1", reqMapping); - setupRequestCustom("custom1", reqCustom); reqSettings.put("key1", "reqValue"); final ClusterState result = executeTask(); - assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom)); assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue")); assertThat(getMappingsFromResponse().get("mapping1").toString(), equalTo("{type={properties={field={type=keyword}}}}")); @@ -272,14 +260,13 @@ public void testShrinkIndexIgnoresTemplates() throws Exception { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "tplValue")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias1"))); - assertThat(result.metaData().index("test").getCustoms(), not(hasKey("custom1"))); + assertThat(result.metaData().index("test").getCustomData(), not(hasKey("custom1"))); assertThat(result.metaData().index("test").getSettings().keySet(), not(Matchers.contains("key1"))); assertThat(getMappingsFromResponse(), not(Matchers.hasKey("mapping1"))); } @@ -296,7 +283,6 @@ public void testWriteIndex() throws Exception { Boolean writeIndex = randomBoolean() ? null : randomBoolean(); setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); @@ -310,7 +296,6 @@ public void testWriteIndexValidationException() throws Exception { .numberOfShards(1).numberOfReplicas(0).build(); idxBuilder.put("test2", existingWriteIndex); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); setupRequestAlias(new Alias("alias1").writeIndex(true)); @@ -342,8 +327,8 @@ private IndexMetaData.Builder createIndexMetaDataBuilder(String name, String uui .numberOfReplicas(numReplicas); } - private IndexMetaData.Custom createCustom() { - return mock(IndexMetaData.Custom.class); + private Map createCustom() { + return Collections.singletonMap("a", "b"); } private interface MetaDataBuilderConfigurator { @@ -372,10 +357,6 @@ private void setupRequestMapping(String mappingKey, CompressedXContent mapping) when(request.mappings()).thenReturn(Collections.singletonMap(mappingKey, mapping.string())); } - private void setupRequestCustom(String customKey, IndexMetaData.Custom custom) throws IOException { - when(request.customs()).thenReturn(Collections.singletonMap(customKey, custom)); - } - private CompressedXContent createMapping() throws IOException { return createMapping("text"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 9e8a5e04f43c1..393f7f6b1d4aa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -45,6 +47,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.is; @@ -71,6 +75,9 @@ protected NamedXContentRegistry xContentRegistry() { public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); IndexMetaData metaData = IndexMetaData.builder("foo") .settings(Settings.builder() .put("index.version.created", 1) @@ -80,6 +87,7 @@ public void testIndexMetaDataSerialization() throws IOException { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) + .putCustom("my_custom", customMap) .putRolloverInfo( new RolloverInfo(randomAlphaOfLength(5), Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), @@ -93,7 +101,8 @@ public void testIndexMetaDataSerialization() throws IOException { builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser); - assertEquals(metaData, fromXContentMeta); + assertEquals("expected: " + Strings.toString(metaData) + "\nactual : " + Strings.toString(fromXContentMeta), + metaData, fromXContentMeta); assertEquals(metaData.hashCode(), fromXContentMeta.hashCode()); assertEquals(metaData.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas()); @@ -103,6 +112,11 @@ public void testIndexMetaDataSerialization() throws IOException { assertEquals(metaData.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metaData.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), fromXContentMeta.primaryTerm(0)); + ImmutableOpenMap.Builder expectedCustomBuilder = ImmutableOpenMap.builder(); + expectedCustomBuilder.put("my_custom", new DiffableStringMap(customMap)); + ImmutableOpenMap expectedCustom = expectedCustomBuilder.build(); + assertEquals(metaData.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), fromXContentMeta.getCustomData()); final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); @@ -119,6 +133,8 @@ public void testIndexMetaDataSerialization() throws IOException { assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + assertEquals(deserialized.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), deserialized.getCustomData()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index 6d489f5feb314..5fc0764235417 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -18,12 +18,9 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -35,62 +32,15 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; public class IndexTemplateMetaDataTests extends ESTestCase { - // bwc for #21009 - public void testIndexTemplateMetaData510() throws IOException { - IndexTemplateMetaData metaData = IndexTemplateMetaData.builder("foo") - .patterns(Collections.singletonList("bar")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - IndexTemplateMetaData multiMetaData = IndexTemplateMetaData.builder("foo") - .patterns(Arrays.asList("bar", "foo")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code - String templateBytes = "A2ZvbwAAAAEDYmFyAghzZXR0aW5nMQEGdmFsdWUxCHNldHRpbmcyAQZ2YWx1ZTIAAQphbGlhcy1iYXIxAAAAAAA="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(templateBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - IndexTemplateMetaData readMetaData = IndexTemplateMetaData.readFrom(in); - assertEquals(0, in.available()); - assertEquals(metaData.getName(), readMetaData.getName()); - assertEquals(metaData.getPatterns(), readMetaData.getPatterns()); - assertTrue(metaData.aliases().containsKey("alias-bar1")); - assertEquals(1, metaData.aliases().size()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - public void testIndexTemplateMetaDataXContentRoundTrip() throws Exception { ToXContent.Params params = new ToXContent.MapParams(singletonMap("reduce_mappings", "true")); @@ -128,13 +78,13 @@ public void testIndexTemplateMetaDataXContentRoundTrip() throws Exception { public void testValidateInvalidIndexPatterns() throws Exception { final IllegalArgumentException emptyPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(emptyPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got []")); final IllegalArgumentException nullPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(nullPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got null")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index e329e70134c0c..c1e341fd5bc2f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -147,7 +147,7 @@ public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0_alpha1) .put(indexSettings) .build(); return IndexMetaData.builder(name).settings(build).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index 1e46c2c428663..865059c337903 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -16,12 +16,15 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.cluster.metadata; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -31,6 +34,7 @@ import java.util.Collections; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { @@ -47,8 +51,18 @@ public void testMappingClusterStateUpdateDoesntChangeExistingIndices() throws Ex final ClusterService clusterService = getInstanceFromNode(ClusterService.class); // TODO - it will be nice to get a random mapping generator final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); - request.source("{ \"properties\" { \"field\": { \"type\": \"text\" }}}"); - mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": { \"field\": { \"type\": \"text\" }}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + // the task completed successfully + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + // the task really was a mapping update + assertThat( + indexService.mapperService().documentMapper("type").mappingSource(), + not(equalTo(result.resultingState.metaData().index("test").mapping("type").source()))); + // since we never committed the cluster state update, the in-memory state is unchanged assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping)); } @@ -69,4 +83,35 @@ public void testClusterStateIsNotChangedWithIdenticalMappings() throws Exception assertSame(result, result2); } + + public void testMappingVersion() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final long previousVersion = indexService.getMetaData().getMappingVersion(); + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": { \"field\": { \"type\": \"text\" }}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + assertThat(result.resultingState.metaData().index("test").getMappingVersion(), equalTo(1 + previousVersion)); + } + + public void testMappingVersionUnchanged() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final long previousVersion = indexService.getMetaData().getMappingVersion(); + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.indices(new Index[] {indexService.index()}); + request.source("{ \"properties\": {}}"); + final ClusterStateTaskExecutor.ClusterTasksResult result = + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + assertThat(result.executionResults.size(), equalTo(1)); + assertTrue(result.executionResults.values().iterator().next().isSuccess()); + assertThat(result.resultingState.metaData().index("test").getMappingVersion(), equalTo(previousVersion)); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 8038d9b5e18de..d4645208071a3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -228,7 +228,7 @@ protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { } final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, - VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, null)); + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 1fa1ff3a154af..787789d410ff9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -576,7 +576,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // add a single node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .add(newNode("node1-5.x", Version.V_5_6_0))) + .add(newNode("node1-5.x", Version.fromId(5060099)))) .build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); @@ -590,7 +590,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // add another 5.6 node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node2-5.x", Version.V_5_6_0))) + .add(newNode("node2-5.x", Version.fromId(5060099)))) .build(); // start the shards, should have 1 primary and 1 replica available diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index 2022ecb945ba0..536e3cbb7e08d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; @@ -243,46 +241,4 @@ public void testSourcePrimaryActive() { routingAllocation).getExplanation()); } } - - public void testAllocateOnOldNode() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - VersionUtils.getPreviousVersion(ResizeAction.COMPATIBILITY_VERSION)); - ClusterState clusterState = createInitialClusterState(true, version); - MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); - metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") - .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) - .numberOfShards(4).numberOfReplicas(0)); - MetaData metaData = metaBuilder.build(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); - routingTableBuilder.addAsNew(metaData.index("target")); - - clusterState = ClusterState.builder(clusterState) - .routingTable(routingTableBuilder.build()) - .metaData(metaData).build(); - Index idx = clusterState.metaData().index("target").getIndex(); - - - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); - int shardId = randomIntBetween(0, 3); - int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); - assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); - - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation)); - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation)); - - routingAllocation.debugDecision(true); - assertEquals("source primary is active", resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); - assertEquals("node [node1] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation).getExplanation()); - assertEquals("node [node2] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation).getExplanation()); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 10fc358e4d4ea..da0e0a9b0bcfa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -286,16 +286,19 @@ public void testSizeShrinkIndex() { metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put("index.uuid", "1234")) .numberOfShards(4).numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT).put("index.uuid", "5678") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(1).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(1) + .numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target2").settings(settings(Version.CURRENT).put("index.uuid", "9101112") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(2).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(2).numberOfReplicas(0)); MetaData metaData = metaBuilder.build(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); routingTableBuilder.addAsNew(metaData.index("test")); routingTableBuilder.addAsNew(metaData.index("target")); routingTableBuilder.addAsNew(metaData.index("target2")); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); AllocationService allocationService = createAllocationService(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))) @@ -330,7 +333,6 @@ public void testSizeShrinkIndex() { assertEquals(100L, DiskThresholdDecider.getExpectedShardSize(test_1, allocation, 0)); assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0)); - ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1110L, DiskThresholdDecider.getExpectedShardSize(target, allocation, 0)); @@ -350,12 +352,9 @@ public void testSizeShrinkIndex() { .build(); allocationService.reroute(clusterState, "foo"); - RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); - assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } - } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index b1fa8346e2c54..ba6fe5b9a5a3b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,8 +42,8 @@ import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_NAME; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_UUID; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_NAME; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_UUID; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; @@ -151,8 +151,8 @@ private ClusterState createInitialClusterState(AllocationService service, Settin .putInSyncAllocationIds(1, Collections.singleton("aid1")) .build(); metaData.put(sourceIndex, false); - indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); - indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); + indexSettings.put(INDEX_RESIZE_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); + indexSettings.put(INDEX_RESIZE_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); } else { sourceIndex = null; } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index 87f98389231e4..b4a24cfc4fcd0 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -98,6 +98,11 @@ public void testLongGeohashes() { } } + public void testNorthPoleBoundingBox() { + Rectangle bbox = GeoHashUtils.bbox("zzbxfpgzupbx"); // Bounding box with maximum precision touching north pole + assertEquals(90.0, bbox.maxLat, 0.0000001); // Should be 90 degrees + } + public void testInvalidGeohashes() { IllegalArgumentException ex; diff --git a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 5d1e10956d5a2..890f6ef163b33 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -416,8 +416,8 @@ public void testMMapHackSupported() throws Exception { public void testWrapAllDocsLive() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) - .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); IndexWriter writer = new IndexWriter(dir, config); int numDocs = between(1, 10); Set liveDocs = new HashSet<>(); @@ -434,9 +434,9 @@ public void testWrapAllDocsLive() throws Exception { Document doc = new Document(); doc.add(new StringField("id", "v2-" + id, Store.YES)); if (randomBoolean()) { - doc.add(Lucene.newSoftDeleteField()); + doc.add(Lucene.newSoftDeletesField()); } - writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeleteField()); + writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeletesField()); liveDocs.add("v2-" + id); } } @@ -456,8 +456,8 @@ public void testWrapAllDocsLive() throws Exception { public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) - .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); IndexWriter writer = new IndexWriter(dir, config); int numDocs = between(1, 10); List liveDocs = new ArrayList<>(); @@ -466,7 +466,7 @@ public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { Document doc = new Document(); doc.add(new StringField("id", id, Store.YES)); if (randomBoolean()) { - doc.add(Lucene.newSoftDeleteField()); + doc.add(Lucene.newSoftDeletesField()); } writer.addDocument(doc); liveDocs.add(id); diff --git a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java index 33ea83c759297..f188eb4cac6f4 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -69,7 +69,7 @@ public void testEnumIds() { public void testConversion() { long millis = randomLongBetween(0, Instant.now().toEpochMilli()); DateTimeZone zone = randomDateTimeZone(); - ZoneId zoneId = ZoneId.of(zone.getID()); + ZoneId zoneId = zone.toTimeZone().toZoneId(); int offsetSeconds = zoneId.getRules().getOffset(Instant.ofEpochMilli(millis)).getTotalSeconds(); long parsedMillisJavaTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2376d5663402e..1580c1a379780 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; @@ -180,6 +181,99 @@ public void testDependentSettings() { service.validate(Settings.builder().put("foo.test.bar", 7).build(), false); } + public void testTupleAffixUpdateConsumer() { + String prefix = randomAlphaOfLength(3) + "foo."; + String intSuffix = randomAlphaOfLength(3); + String listSuffix = randomAlphaOfLength(4); + Setting.AffixSetting intSetting = Setting.affixKeySetting(prefix, intSuffix, + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting> listSetting = Setting.affixKeySetting(prefix, listSuffix, + (k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting))); + Map, Integer>> results = new HashMap<>(); + Function listBuilder = g -> (prefix + g + "." + listSuffix); + Function intBuilder = g -> (prefix + g + "." + intSuffix); + String group1 = randomAlphaOfLength(3); + String group2 = randomAlphaOfLength(4); + String group3 = randomAlphaOfLength(5); + BiConsumer, Integer>> listConsumer = results::put; + + service.addAffixUpdateConsumer(listSetting, intSetting, listConsumer, (s, k) -> { + if (k.v1().isEmpty() && k.v2() == 2) { + throw new IllegalArgumentException("boom"); + } + }); + assertEquals(0, results.size()); + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group2), "18", "19", "20") + .build()); + assertEquals(2, results.get(group1).v2().intValue()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(Arrays.asList(18, 19, 20), results.get(group2).v1()); + assertEquals(2, results.size()); + + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putNull(listBuilder.apply(group2)) // removed + .build()); + + assertNull(group1 + " wasn't changed", results.get(group1)); + assertEquals(1, results.get(group2).v1().size()); + assertEquals(Arrays.asList(1), results.get(group2).v1()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") // added + .build()); + assertNull(group1 + " wasn't changed", results.get(group1)); + assertNull(group2 + " wasn't changed", results.get(group2)); + + assertEquals(2, results.get(group3).v1().size()); + assertEquals(Arrays.asList(5, 6), results.get(group3).v1()); + assertEquals(1, results.get(group3).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 4) // modified + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") + .build()); + assertNull(group2 + " wasn't changed", results.get(group2)); + assertNull(group3 + " wasn't changed", results.get(group3)); + + assertEquals(2, results.get(group1).v1().size()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(4, results.get(group1).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) // modified to trip validator + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1)) // modified to trip validator + .putList(listBuilder.apply(group3), "5", "6") + .build()) + ); + assertEquals("boom", iae.getMessage()); + assertEquals(0, results.size()); + } + public void testAddConsumerAffix() { Setting.AffixSetting intSetting = Setting.affixKeySetting("foo.", "bar", (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); @@ -893,7 +987,7 @@ public void testInternalIndexSettingsFailsValidation() { public void testInternalIndexSettingsSkipValidation() { final Setting internalIndexSetting = Setting.simpleString("index.internal", Property.InternalIndex, Property.IndexScope); - final IndexScopedSettings indexScopedSettings = + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, Collections.singleton(internalIndexSetting)); // nothing should happen, validation should not throw an exception final Settings settings = Settings.builder().put("index.internal", "internal").build(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index c32037f44525e..7063e53f7891e 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -757,7 +757,7 @@ public void testTimeValue() { public void testSettingsGroupUpdater() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.foo", 123).put("prefix.same", 5555).build(); @@ -768,7 +768,7 @@ public void testSettingsGroupUpdater() { public void testSettingsGroupUpdaterRemoval() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.same", 5555).build(); @@ -783,7 +783,7 @@ public void testSettingsGroupUpdaterWithAffixSetting() { Setting.AffixSetting affixSetting = Setting.affixKeySetting("prefix.foo.", "suffix", key -> Setting.simpleString(key,Property.NodeScope, Property.Dynamic)); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, prefixKeySetting, affixSetting)); Settings.Builder currentSettingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index e193ea34498cf..feaa7c4a0ae58 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.hamcrest.MatcherAssert; @@ -319,9 +318,4 @@ public void testGetBytesAsInt() { } } } - - public void testOldSerialisation() throws IOException { - ByteSizeValue original = createTestInstance(); - assertSerialization(original, randomFrom(Version.V_5_6_4, Version.V_5_6_5, Version.V_6_0_0, Version.V_6_0_1, Version.V_6_1_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 0074da43fcfb8..520f80fecac44 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -59,7 +59,7 @@ public void testParseFromXContent() throws IOException { Float floatRep = randomFloat(); Number value = intValue; if (randomBoolean()) { - value = new Float(floatRep += intValue); + value = Float.valueOf(floatRep += intValue); } XContentBuilder json = jsonBuilder().startObject() .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value) diff --git a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 76dd8e343a266..dd2627f4bc206 100644 --- a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -63,7 +63,7 @@ public void testUpgradeCustomDataPath() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -91,7 +91,7 @@ public void testPartialUpgradeCustomDataPath() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -129,7 +129,7 @@ public void testUpgrade() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); @@ -153,7 +153,7 @@ public void testUpgradeIndices() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index cc6152d98962f..a0fdcbf51ca1d 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -337,7 +337,7 @@ public void testInheritContext() throws InterruptedException { final CountDownLatch executed = new CountDownLatch(1); threadContext.putHeader("foo", "bar"); - final Integer one = new Integer(1); + final Integer one = Integer.valueOf(1); threadContext.putTransient("foo", one); EsThreadPoolExecutor executor = EsExecutors.newFixed(getName(), pool, queue, EsExecutors.daemonThreadFactory("dummy"), threadContext); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index e71efa46424b2..a0a92cad7a851 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -42,7 +42,7 @@ public void testStashContext() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { assertNull(threadContext.getHeader("foo")); @@ -61,7 +61,7 @@ public void testStashAndMerge() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); HashMap toMerge = new HashMap<>(); toMerge.put("foo", "baz"); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 170ea6cf9313d..3fb5f5996be79 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -22,7 +22,6 @@ import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -51,6 +50,19 @@ import java.io.IOException; import java.math.BigInteger; import java.nio.file.Path; +import java.time.DayOfWeek; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.Month; +import java.time.MonthDay; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.Year; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; @@ -459,6 +471,116 @@ public void testCalendar() throws Exception { .endObject()); } + public void testJavaTime() throws Exception { + final ZonedDateTime d1 = ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + + // ZonedDateTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (ZonedDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((ZonedDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (ZonedDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1).endObject()); + + // Instant + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (java.time.Instant) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((java.time.Instant) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (java.time.Instant) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1.toInstant()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1.toInstant()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1.toInstant()).endObject()); + + // LocalDateTime (no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", + () -> builder().startObject().timeField("d1", d1.toLocalDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", + () -> builder().startObject().field("d1").timeValue(d1.toLocalDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000'}", () -> builder().startObject().field("d1", d1.toLocalDateTime()).endObject()); + + // LocalDate (no time, no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDate) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalDate) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalDate) null).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().timeField("d1", d1.toLocalDate()).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().field("d1").timeValue(d1.toLocalDate()).endObject()); + assertResult("{'d1':'2016-01-01'}", () -> builder().startObject().field("d1", d1.toLocalDate()).endObject()); + + // LocalTime (no date, no time zone) + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalTime) null).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().timeField("d1", d1.toLocalTime()).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().field("d1").timeValue(d1.toLocalTime()).endObject()); + assertResult("{'d1':'00:00:00.000'}", () -> builder().startObject().field("d1", d1.toLocalTime()).endObject()); + final ZonedDateTime d2 = ZonedDateTime.of(2016, 1, 1, 7, 59, 23, 123_000_000, ZoneOffset.UTC); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().timeField("d1", d2.toLocalTime()).endObject()); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().field("d1").timeValue(d2.toLocalTime()).endObject()); + assertResult("{'d1':'07:59:23.123'}", () -> builder().startObject().field("d1", d2.toLocalTime()).endObject()); + + // OffsetDateTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (OffsetDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((OffsetDateTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (OffsetDateTime) null).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1.toOffsetDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", + () -> builder().startObject().timeField("d1", d1.toOffsetDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", + () -> builder().startObject().field("d1").timeValue(d1.toOffsetDateTime()).endObject()); + // also test with a date that has a real offset + OffsetDateTime offsetDateTime = d1.withZoneSameLocal(ZoneOffset.ofHours(5)).toOffsetDateTime(); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", () -> builder().startObject().field("d1", offsetDateTime).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", () -> builder().startObject().timeField("d1", offsetDateTime).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000+05:00'}", + () -> builder().startObject().field("d1").timeValue(offsetDateTime).endObject()); + + // OffsetTime + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (OffsetTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((OffsetTime) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date", (OffsetTime) null).endObject()); + final OffsetTime offsetTime = d2.toOffsetDateTime().toOffsetTime(); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().timeField("o", offsetTime).endObject()); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().field("o").timeValue(offsetTime).endObject()); + assertResult("{'o':'07:59:23.123Z'}", () -> builder().startObject().field("o", offsetTime).endObject()); + // also test with a date that has a real offset + final OffsetTime zonedOffsetTime = offsetTime.withOffsetSameLocal(ZoneOffset.ofHours(5)); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().timeField("o", zonedOffsetTime).endObject()); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().field("o").timeValue(zonedOffsetTime).endObject()); + assertResult("{'o':'07:59:23.123+05:00'}", () -> builder().startObject().field("o", zonedOffsetTime).endObject()); + + // DayOfWeek enum, not a real time value, but might be used in scripts + assertResult("{'dayOfWeek':null}", () -> builder().startObject().field("dayOfWeek", (DayOfWeek) null).endObject()); + DayOfWeek dayOfWeek = randomFrom(DayOfWeek.values()); + assertResult("{'dayOfWeek':'" + dayOfWeek + "'}", () -> builder().startObject().field("dayOfWeek", dayOfWeek).endObject()); + + // Month + Month month = randomFrom(Month.values()); + assertResult("{'m':null}", () -> builder().startObject().field("m", (Month) null).endObject()); + assertResult("{'m':'" + month + "'}", () -> builder().startObject().field("m", month).endObject()); + + // MonthDay + MonthDay monthDay = MonthDay.of(month, randomIntBetween(1, 28)); + assertResult("{'m':null}", () -> builder().startObject().field("m", (MonthDay) null).endObject()); + assertResult("{'m':'" + monthDay + "'}", () -> builder().startObject().field("m", monthDay).endObject()); + + // Year + Year year = Year.of(randomIntBetween(0, 2300)); + assertResult("{'y':null}", () -> builder().startObject().field("y", (Year) null).endObject()); + assertResult("{'y':'" + year + "'}", () -> builder().startObject().field("y", year).endObject()); + + // Duration + Duration duration = Duration.ofSeconds(randomInt(100000)); + assertResult("{'d':null}", () -> builder().startObject().field("d", (Duration) null).endObject()); + assertResult("{'d':'" + duration + "'}", () -> builder().startObject().field("d", duration).endObject()); + + // Period + Period period = Period.ofDays(randomInt(1000)); + assertResult("{'p':null}", () -> builder().startObject().field("p", (Period) null).endObject()); + assertResult("{'p':'" + period + "'}", () -> builder().startObject().field("p", period).endObject()); + } + public void testGeoPoint() throws Exception { assertResult("{'geo':null}", () -> builder().startObject().field("geo", (GeoPoint) null).endObject()); assertResult("{'geo':{'lat':52.4267578125,'lon':13.271484375}}", () -> builder() diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 0f3288b1973e5..ac2f2b0d4f32e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -109,6 +109,8 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) { protected void beforeIndexDeletion() throws Exception { if (disableBeforeIndexDeletion == false) { super.beforeIndexDeletion(); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + assertSeqNos(); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index f2491b2db1f9a..82ec987420bb7 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,6 +28,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -99,7 +99,7 @@ public void clearDummyServices() throws IOException { private DiscoveryModule newModule(Settings settings, List plugins) { return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService, - clusterApplier, clusterSettings, plugins, null); + clusterApplier, clusterSettings, plugins, null, createTempDir().toAbsolutePath()); } public void testDefaults() { diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java similarity index 63% rename from plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java rename to server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index 5837d3bcdfe3f..8922a38ea1e78 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.file; +package org.elasticsearch.discovery.zen; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,9 +26,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -50,16 +48,15 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; -/** - * Tests for {@link FileBasedUnicastHostsProvider}. - */ public class FileBasedUnicastHostsProviderTests extends ESTestCase { + private boolean legacyLocation; private ThreadPool threadPool; private ExecutorService executorService; private MockTransportService transportService; + private Path configPath; @Before public void setUp() throws Exception { @@ -83,23 +80,20 @@ public void tearDown() throws Exception { @Before public void createTransportSvc() { - MockTcpTransport transport = - new MockTcpTransport(Settings.EMPTY, - threadPool, - BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), - new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())) { - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - }; + final MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), + new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(Collections.emptyList())) { + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + }; transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); } public void testBuildDynamicNodes() throws Exception { @@ -114,18 +108,27 @@ public void testBuildDynamicNodes() throws Exception { assertEquals(9300, nodes.get(2).getPort()); } + public void testBuildDynamicNodesLegacyLocation() throws Exception { + legacyLocation = true; + testBuildDynamicNodes(); + assertDeprecatedLocationWarning(); + } + public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } - public void testUnicastHostsDoesNotExist() throws Exception { - final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + public void testEmptyUnicastHostsFileLegacyLocation() throws Exception { + legacyLocation = true; + testEmptyUnicastHostsFile(); + assertDeprecatedLocationWarning(); + } + + public void testUnicastHostsDoesNotExist() { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); @@ -133,42 +136,60 @@ public void testUnicastHostsDoesNotExist() throws Exception { } public void testInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } + public void testInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + public void testSomeInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(1, addresses.size()); // only one of the two is valid and will be used assertEquals("192.168.0.1", addresses.get(0).getAddress()); assertEquals(9301, addresses.get(0).getPort()); } + public void testSomeInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testSomeInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes private List setupAndRunHostProvider(final List hostEntries) throws IOException { final Path homeDir = createTempDir(); final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) - .build(); - final Path configPath; + .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) + .build(); if (randomBoolean()) { configPath = homeDir.resolve("config"); } else { configPath = createTempDir(); } - final Path discoveryFilePath = configPath.resolve("discovery-file"); + final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath; Files.createDirectories(discoveryFilePath); final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); + } + + private void assertDeprecatedLocationWarning() { + assertWarnings("Found dynamic hosts list at [" + + configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) + + "] but this path is deprecated. This list should be at [" + + configPath.resolve(UNICAST_HOSTS_FILE) + + "] instead. Support for the deprecated path will be removed in future."); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java index 2f4be2fcd5394..8ebb543da6e4f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java @@ -28,9 +28,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import java.util.stream.Collectors; - -import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.incompatibleFutureVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; @@ -80,7 +77,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { + if (maxNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -91,7 +88,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.before(Version.V_5_5_0)) { + if (minNodeVersion.before(Version.V_6_0_0)) { Version tooHigh = incompatibleFutureVersion(minNodeVersion); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -102,8 +99,8 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { - Version oldMajor = randomFrom(allVersions().stream().filter(v -> v.major < 6).collect(Collectors.toList())); + if (minNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> MembershipAction.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d236d01f049dd..0bf80e5239874 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -267,6 +267,7 @@ public void testLoadState() throws IOException { IndexMetaData deserialized = indices.get(original.getIndex().getName()); assertThat(deserialized, notNullValue()); assertThat(deserialized.getVersion(), equalTo(original.getVersion())); + assertThat(deserialized.getMappingVersion(), equalTo(original.getMappingVersion())); assertThat(deserialized.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas())); assertThat(deserialized.getNumberOfShards(), equalTo(original.getNumberOfShards())); } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3b92f04df0893..b0b6c35f92a1a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -482,9 +482,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat("all existing files should be reused, file count mismatch", recoveryState.getIndex().reusedFileCount(), equalTo(filesReused)); assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount() - filesRecovered)); assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); - // both cases will be zero once we start sending only ops after local checkpoint of the safe commit - int expectedTranslogOps = softDeleteEnabled ? numDocs + moreDocs : 0; - assertThat("no translog ops should be recovered", recoveryState.getTranslog().recoveredOperations(), equalTo(expectedTranslogOps)); + assertThat("no translog ops should be recovered", recoveryState.getTranslog().recoveredOperations(), equalTo(0)); } } } diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 5ed6b957c78a4..829d6ff7c1458 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -528,7 +528,7 @@ public void testGetFieldsMetaDataWithRouting() throws Exception { assertAcked(prepareCreate("test") .addMapping("_doc", "field1", "type=keyword,store=true") .addAlias(new Alias("alias")) - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id))); + .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_6_0_0.id))); // multi types in 5.6 client().prepareIndex("test", "_doc", "1") diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index a82b932e2b570..000722863887c 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -87,6 +87,8 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; public class IndexModuleTests extends ESTestCase { @@ -376,6 +378,21 @@ public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOExc indexService.close("simon says", false); } + public void testMmapfsStoreTypeNotAllowed() { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put("index.store.type", "mmapfs") + .build(); + final Settings nodeSettings = Settings.builder() + .put(IndexModule.NODE_STORE_ALLOW_MMAPFS.getKey(), false) + .build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(new Index("foo", "_na_"), settings, nodeSettings); + final IndexModule module = + new IndexModule(indexSettings, emptyAnalysisRegistry, new InternalEngineFactory(), Collections.emptyMap()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); + assertThat(e, hasToString(containsString("store type [mmapfs] is not allowed"))); + } + class CustomQueryCache implements QueryCache { @Override diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index b7da5add2acf6..64a2fa69bcbd5 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -553,4 +553,12 @@ public void testQueryDefaultField() { ); assertThat(index.getDefaultFields(), equalTo(Arrays.asList("body", "title"))); } + + public void testUpdateSoftDeletesFails() { + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> + settings.updateSettings(Settings.builder().put("index.soft_deletes.enabled", randomBoolean()).build(), + Settings.builder(), Settings.builder(), "index")); + assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updateable")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 78569d927be76..0dcba53df88e7 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -146,15 +146,4 @@ public void testInvalidMissing() throws IOException { assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + " must be one of [_last, _first]")); } - - public void testInvalidVersion() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0)); - assertThat(exc.getMessage(), - containsString("unsupported index.version.created:5.4.0, " + - "can't set index.sort on versions prior to 6.0.0-alpha1")); - } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 26a5b87866c21..04dc98deb7bf5 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -103,7 +103,7 @@ public void testOverrideDefaultAnalyzer() throws IOException { } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); AnalyzerProvider defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 9aba48f7de55b..33ec090c61e01 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -56,21 +56,21 @@ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_5_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_6_0_0))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); // same es version should be cached - assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1)); - assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_0), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_1)); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_0), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_1)); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 67049bc3c1c25..d3aead9e44e16 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -661,7 +661,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); assertThat(counter.get(), equalTo(2)); searcher.close(); @@ -678,7 +678,7 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertFalse(engine.isRecovering()); doc = testParsedDocument("2", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc)); @@ -708,7 +708,7 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } trimUnsafeCommits(engine.config()); try (Engine recoveringEngine = new InternalEngine(engine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -744,7 +744,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } }; assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertTrue(committed.get()); } finally { IOUtils.close(recoveringEngine); @@ -778,7 +778,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { initialEngine.close(); trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); assertEquals(docs, topDocs.totalHits); @@ -788,6 +788,43 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { } } + public void testRecoveryFromTranslogUpToSeqNo() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final long maxSeqNo; + try (InternalEngine engine = createEngine(config)) { + final int docs = randomIntBetween(1, 100); + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + if (rarely()) { + engine.rollTranslogGeneration(); + } else if (rarely()) { + engine.flush(randomBoolean(), true); + } + } + maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + engine.syncTranslog(); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + engine.recoverFromTranslog(Long.MAX_VALUE); + assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); + engine.recoverFromTranslog(upToSeqNo); + assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); + } + } + } + public void testConcurrentGetAndFlush() throws Exception { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1165,7 +1202,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } @@ -1184,7 +1221,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.close(); trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } @@ -2150,7 +2187,7 @@ public void testSeqNoAndCheckpoints() throws IOException { trimUnsafeCommits(initialEngine.engineConfig); try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertThat( @@ -2471,7 +2508,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); globalCheckpoint.set(engine.getLocalCheckpoint()); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); + expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog(Long.MAX_VALUE)); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2493,7 +2530,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2510,7 +2547,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); } @@ -2524,7 +2561,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2630,7 +2667,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); globalCheckpoint.set(engine.getLocalCheckpoint()); @@ -2641,7 +2678,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier))) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertVisibleCount(engine, 1); final long committedGen = Long.valueOf( engine.getLastCommittedSegmentInfos().getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2709,7 +2746,7 @@ public void testTranslogReplay() throws IOException { engine.close(); trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertVisibleCount(engine, numDocs, false); parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); @@ -3418,7 +3455,7 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); @@ -3701,7 +3738,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } @@ -3804,7 +3841,7 @@ public void testNoOps() throws IOException { maxSeqNo, localCheckpoint); trimUnsafeCommits(engine.config()); - EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, + EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy())); noOpEngine = new InternalEngine(noopEngineConfig, supplier) { @Override @@ -3812,7 +3849,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); } }; - noOpEngine.recoverFromTranslog(); + noOpEngine.recoverFromTranslog(Long.MAX_VALUE); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); @@ -4090,7 +4127,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); @@ -4126,7 +4163,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { if (flushed) { assertThat(recoveringEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); } - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); @@ -4319,7 +4356,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s super.commitIndexWriter(writer, translog, syncId); } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); @@ -4447,13 +4484,18 @@ public void testAcquireIndexCommit() throws Exception { public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); - store = createStore(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", + Settings.builder().put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1).build()); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + try (Store store = createStore(); + InternalEngine engine = + createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { index(engine, docId); - if (frequently()) { + if (rarely()) { engine.flush(randomBoolean(), randomBoolean()); } } @@ -4467,6 +4509,7 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + assertThat(engine.getTranslog().totalOperations(), equalTo(0)); } } @@ -4844,7 +4887,7 @@ public void testLuceneHistoryOnReplica() throws Exception { private void assertOperationHistoryInLucene(List operations) throws IOException { final MergePolicy keepSoftDeleteDocsMP = new SoftDeletesRetentionMergePolicy( - Lucene.SOFT_DELETE_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()); + Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()); Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) @@ -4951,6 +4994,32 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { } } + public void testLastRefreshCheckpoint() throws Exception { + AtomicBoolean done = new AtomicBoolean(); + Thread[] refreshThreads = new Thread[between(1, 8)]; + CountDownLatch latch = new CountDownLatch(refreshThreads.length); + for (int i = 0; i < refreshThreads.length; i++) { + latch.countDown(); + refreshThreads[i] = new Thread(() -> { + while (done.get() == false) { + long checkPointBeforeRefresh = engine.getLocalCheckpoint(); + engine.refresh("test", randomFrom(Engine.SearcherScope.values())); + assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh)); + } + }); + refreshThreads[i].start(); + } + latch.await(); + List ops = generateSingleDocHistory(true, VersionType.EXTERNAL, 1, 10, 1000, "1"); + concurrentlyApplyOps(ops, engine); + done.set(true); + for (Thread thread : refreshThreads) { + thread.join(); + } + engine.refresh("test"); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); + } + private static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index 7de086a3be239..2d097366a2724 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -62,12 +62,12 @@ public void testBasics() throws Exception { long fromSeqNo = randomNonNegativeLong(); long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); // Empty engine - try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); assertThat(error.getMessage(), containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); } - try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) { assertThat(snapshot, SnapshotMatchers.size(0)); } int numOps = between(1, 100); @@ -146,7 +146,7 @@ searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), f // Get snapshot via engine will auto refresh fromSeqNo = randomLongBetween(0, numOps - 1); toSeqNo = randomLongBetween(fromSeqNo, numOps - 1); - try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) { assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); } } @@ -182,7 +182,7 @@ public void testDedupByPrimaryTerm() throws Exception { } } long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); - try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, 0, maxSeqNo, false)) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, 0, maxSeqNo, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { assertThat(op.toString(), op.primaryTerm(), equalTo(latestOperations.get(op.seqNo()))); @@ -251,7 +251,7 @@ void pullOperations(Engine follower) throws IOException { long fromSeqNo = followerCheckpoint + 1; long batchSize = randomLongBetween(0, 100); long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint); - try (Translog.Snapshot snapshot = leader.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + try (Translog.Snapshot snapshot = leader.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { translogHandler.run(follower, snapshot); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 51b270940998a..d16bdc444e6e7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -414,4 +414,22 @@ public void testMergeText() throws Exception { () -> mapper.merge(update.mapping())); assertEquals("mapper [date] of different type, current_type [date], merged_type [text]", e.getMessage()); } + + public void testIllegalFormatField() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date") + .array("format", "test_format") + .endObject() + .endObject() + .endObject() + .endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index d5419bed239dc..b11e4876f9eaf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -745,4 +746,13 @@ public void testDynamicTemplateOrder() throws IOException { client().prepareIndex("test", "type", "1").setSource("foo", "abc").get(); assertThat(index.mapperService().fullName("foo"), instanceOf(KeywordFieldMapper.KeywordFieldType.class)); } + + public void testMappingVersionAfterDynamicMappingUpdate() { + createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + client().prepareIndex("test", "type", "1").setSource("field", "text").get(); + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), equalTo(1 + previousVersion)); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index f48603d30515f..a910c2c86bab8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -40,18 +40,11 @@ public void testParseUnknownParam() throws Exception { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1)); + () -> DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1)); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } public void testParseUnknownMatchType() { - Map templateDef = new HashMap<>(); - templateDef.put("match_mapping_type", "short"); - templateDef.put("mapping", Collections.singletonMap("store", true)); - // if a wrong match type is specified, we ignore the template - assertNull(DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5)); - assertWarnings("match_mapping_type [short] is invalid and will be ignored: No field type matched on [short], " + - "possible values are [object, string, long, double, boolean, date, binary]"); Map templateDef2 = new HashMap<>(); templateDef2.put("match_mapping_type", "text"); templateDef2.put("mapping", Collections.singletonMap("store", true)); @@ -79,7 +72,7 @@ public void testMatchAllTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -87,7 +80,7 @@ public void testMatchTypeTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -97,7 +90,7 @@ public void testSerialization() throws Exception { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -107,7 +100,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -117,7 +110,7 @@ public void testSerialization() throws Exception { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -128,7 +121,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 8f2a51bbfc2bd..5172e7b0b8839 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -57,7 +57,7 @@ protected Collection> getPlugins() { } public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); @@ -107,7 +107,7 @@ public void testExternalValues() throws Exception { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); @@ -173,7 +173,7 @@ public void testExternalValuesWithMultifield() throws Exception { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 56e587dc995da..8e5c81e58f189 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -321,11 +321,16 @@ public void testBoost() throws IOException { public void testEnableNorms() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "keyword").field("norms", true).endObject().endObject() - .endObject().endObject()); + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .field("doc_values", false) + .field("norms", true) + .endObject() + .endObject() + .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference @@ -336,8 +341,11 @@ public void testEnableNorms() throws IOException { XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); + assertEquals(1, fields.length); assertFalse(fields[0].fieldType().omitNorms()); + + IndexableField[] fieldNamesFields = doc.rootDoc().getFields(FieldNamesFieldMapper.NAME); + assertEquals(0, fieldNamesFields.length); } public void testNormalizer() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index a291062c7a5bf..eae5b4ac7d2ab 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -28,9 +27,11 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.Lucene; @@ -132,6 +133,23 @@ public void testTermsQuery() { assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + public void testExistsQuery() { + MappedFieldType ft = createDefaultFieldType(); + ft.setName("field"); + + ft.setHasDocValues(true); + ft.setOmitNorms(true); + assertEquals(new DocValuesFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(false); + assertEquals(new NormsFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(true); + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + } + public void testRegexpQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 51b6e9d716887..e31cee29e67bf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -21,13 +21,16 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -119,6 +122,35 @@ public void testIndexIntoDefaultMapping() throws Throwable { assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); } + public void testIndexMetaDataUpdateDoesNotLoseDefaultMapper() throws IOException { + final IndexService indexService = + createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startObject(MapperService.DEFAULT_MAPPING); + { + builder.field("date_detection", false); + } + builder.endObject(); + } + builder.endObject(); + final PutMappingRequest putMappingRequest = new PutMappingRequest(); + putMappingRequest.indices("test"); + putMappingRequest.type(MapperService.DEFAULT_MAPPING); + putMappingRequest.source(builder); + client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(builder).get(); + } + assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); + final Settings zeroReplicasSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(zeroReplicasSettings).get(); + /* + * This assertion is a guard against a previous bug that would lose the default mapper when applying a metadata update that did not + * update the default mapping. + */ + assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); + } + public void testTotalFieldsExceedsLimit() throws Throwable { Function mapping = type -> { try { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 54418850e5d4f..00068f76e753d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -443,4 +443,22 @@ public void testSerializeDefaults() throws Exception { } } + public void testIllegalFormatField() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date_range") + .array("format", "test_format") + .endObject() + .endObject() + .endObject() + .endObject()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java index ec21a1f7286a4..574d4eee70a00 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java @@ -159,4 +159,30 @@ public void testDynamicTemplates() throws Exception { mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } + + public void testIllegalFormatField() throws Exception { + String dynamicMapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("dynamic_date_formats") + .startArray().value("test_format").endArray() + .endArray() + .endObject() + .endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("date_formats") + .startArray().value("test_format").endArray() + .endArray() + .endObject() + .endObject()); + + DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + for (String m : Arrays.asList(mapping, dynamicMapping)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(m))); + assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 0af663219903f..3bec98d33eec7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -61,7 +61,7 @@ public void testDocValuesSingleType() throws Exception { public void testDocValues(boolean singleType) throws IOException { Settings indexSettings = singleType ? Settings.EMPTY : Settings.builder() - .put("index.version.created", Version.V_5_6_0) + .put("index.version.created", Version.V_6_0_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index 3f8e8e9efec39..d8650331d2323 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -30,6 +32,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; @@ -188,4 +191,30 @@ public void testRejectFieldDefinedTwice() throws IOException { () -> mapperService2.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("mapper [foo] of different type, current_type [long], merged_type [ObjectMapper]")); } + + public void testMappingVersion() { + createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + { + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + final PutMappingRequest request = new PutMappingRequest(); + request.indices("test"); + request.type("type"); + request.source("field", "type=text"); + client().admin().indices().putMapping(request).actionGet(); + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), Matchers.equalTo(1 + previousVersion)); + } + + { + final long previousVersion = clusterService.state().metaData().index("test").getMappingVersion(); + final PutMappingRequest request = new PutMappingRequest(); + request.indices("test"); + request.type("type"); + request.source("field", "type=text"); + client().admin().indices().putMapping(request).actionGet(); + // the version should be unchanged after putting the same mapping again + assertThat(clusterService.state().metaData().index("test").getMappingVersion(), Matchers.equalTo(previousVersion)); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 0de9cac885502..496d8512d4e28 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -366,9 +365,6 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws public void testMatchPhrasePrefixWithBoost() throws Exception { QueryShardContext context = createShardContext(); - assumeTrue("test runs only when the index version is on or after V_5_0_0_alpha1", - context.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1)); - { // field boost is applied on a single term query MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder("string_boost", "foo"); diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 6ac97373dfa1a..72898dd3911cd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -36,13 +35,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.search.MoreLikeThisQuery; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; @@ -52,7 +49,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -333,26 +329,6 @@ public void testItemFromXContent() throws IOException { assertEquals(expectedItem, newItem); } - public void testItemSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AQVpbmRleAEEdHlwZQEODXsiZm9vIjoiYmFyIn0A/wD//////////QAAAAAAAAAA"); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - Item item = new Item(in); - assertEquals(XContentType.JSON, item.xContentType()); - assertEquals("{\"foo\":\"bar\"}", item.doc().utf8ToString()); - assertEquals("index", item.index()); - assertEquals("type", item.type()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - item.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - @Override protected boolean isCachable(MoreLikeThisQueryBuilder queryBuilder) { return queryBuilder.likeItems().length == 0; // items are always fetched diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index e30cdaca40204..2f69ef7674d4f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; @@ -381,6 +382,69 @@ public void testDefaultField() throws Exception { assertEquals(expected, query); } + public void testWithStopWords() throws Exception { + Query query = new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .analyzer("stop") + .toQuery(createShardContext()); + Query expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(), + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")), BooleanClause.Occur.SHOULD) + .build() + ), 0f); + assertEquals(expected, query); + + query = new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index a2e6018d0ef6b..76479791283b4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -124,10 +124,6 @@ protected void doAssertLuceneQuery(NestedQueryBuilder queryBuilder, Query query, public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { NestedQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 87197b662d142..b0ee32548737a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -998,6 +998,18 @@ public void testExistsFieldQuery() throws Exception { } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); } + + for (boolean quoted : new boolean[] {true, false}) { + String value = (quoted ? "\"" : "") + STRING_FIELD_NAME + (quoted ? "\"" : ""); + queryBuilder = new QueryStringQueryBuilder("_exists_:" + value); + query = queryBuilder.toQuery(context); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) + && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); + } else { + assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); + } + } QueryShardContext contextNoType = createShardContextWithNoType(); query = queryBuilder.toQuery(contextNoType); assertThat(query, equalTo(new MatchNoDocsQuery())); @@ -1266,11 +1278,58 @@ public void testWithStopWords() throws Exception { .field(STRING_FIELD_NAME) .analyzer("stop") .toQuery(createShardContext()); - BooleanQuery expected = new BooleanQuery.Builder() - .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), Occur.SHOULD) - .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), Occur.SHOULD) + Query expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), Occur.SHOULD) + .build(), + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")), Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")), Occur.SHOULD) + .build() + ), 0f); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + assertEquals(new BooleanQuery.Builder().build(), query); + + query = new BoolQueryBuilder() + .should( + new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder().build(), BooleanClause.Occur.SHOULD) .build(); assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + assertEquals(expected, query); } public void testWithPrefixStopWords() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 6cde10308c6e7..36da37c44c66f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -613,11 +613,59 @@ public void testWithStopWords() throws Exception { .field(STRING_FIELD_NAME) .analyzer("stop") .toQuery(createShardContext()); - BooleanQuery expected = new BooleanQuery.Builder() + Query expected = new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) .build(); assertEquals(expected, query); + + query = new SimpleQueryStringBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "quick")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")) + ), 1.0f), BooleanClause.Occur.SHOULD) + .add(new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "fox")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")) + ), 1.0f), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + assertEquals(new MatchNoDocsQuery(), query); + + query = new BoolQueryBuilder() + .should( + new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + assertEquals(expected, query); } public void testWithPrefixStopWords() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index a288328391a9d..71aab8ca9f9f6 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -24,17 +24,26 @@ import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.containsString; -public class BulkByScrollResponseTests extends ESTestCase { +public class BulkByScrollResponseTests extends AbstractXContentTestCase { + + private boolean includeUpdated; + private boolean includeCreated; public void testRountTrip() throws IOException { BulkByScrollResponse response = new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()), @@ -94,4 +103,73 @@ private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollRes assertEquals(expectedFailure.getReason().getMessage(), actualFailure.getReason().getMessage()); } } + + public static void assertEqualBulkResponse(BulkByScrollResponse expected, BulkByScrollResponse actual, + boolean includeUpdated, boolean includeCreated) { + assertEquals(expected.getTook(), actual.getTook()); + BulkByScrollTaskStatusTests + .assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated); + assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); + for (int i = 0; i < expected.getBulkFailures().size(); i++) { + Failure expectedFailure = expected.getBulkFailures().get(i); + Failure actualFailure = actual.getBulkFailures().get(i); + assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); + assertEquals(expectedFailure.getType(), actualFailure.getType()); + assertEquals(expectedFailure.getId(), actualFailure.getId()); + assertThat(expectedFailure.getMessage(), containsString(actualFailure.getMessage())); + assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); + } + assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size()); + for (int i = 0; i < expected.getSearchFailures().size(); i++) { + ScrollableHitSource.SearchFailure expectedFailure = expected.getSearchFailures().get(i); + ScrollableHitSource.SearchFailure actualFailure = actual.getSearchFailures().get(i); + assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); + assertEquals(expectedFailure.getShardId(), actualFailure.getShardId()); + assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId()); + assertThat(expectedFailure.getReason().getMessage(), containsString(actualFailure.getReason().getMessage())); + } + } + + @Override + protected void assertEqualInstances(BulkByScrollResponse expected, BulkByScrollResponse actual) { + assertEqualBulkResponse(expected, actual, includeUpdated, includeCreated); + } + + @Override + protected BulkByScrollResponse createTestInstance() { + // failures are tested separately, so we can test XContent equivalence at least when we have no failures + return + new BulkByScrollResponse( + timeValueMillis(randomNonNegativeLong()), BulkByScrollTaskStatusTests.randomStatusWithoutException(), + emptyList(), emptyList(), randomBoolean() + ); + } + + @Override + protected BulkByScrollResponse doParseInstance(XContentParser parser) throws IOException { + return BulkByScrollResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); + } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java new file mode 100644 index 0000000000000..0d84b0e1412b1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.index.reindex.BulkByScrollTask.StatusOrException; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; + +public class BulkByScrollTaskStatusOrExceptionTests extends AbstractXContentTestCase { + @Override + protected StatusOrException createTestInstance() { + // failures are tested separately, so we can test XContent equivalence at least when we have no failures + return createTestInstanceWithoutExceptions(); + } + + static StatusOrException createTestInstanceWithoutExceptions() { + return new StatusOrException(BulkByScrollTaskStatusTests.randomStatusWithoutException()); + } + + static StatusOrException createTestInstanceWithExceptions() { + if (randomBoolean()) { + return new StatusOrException(new ElasticsearchException("test_exception")); + } else { + return new StatusOrException(BulkByScrollTaskStatusTests.randomStatus()); + } + } + + @Override + protected StatusOrException doParseInstance(XContentParser parser) throws IOException { + return StatusOrException.fromXContent(parser); + } + + public static void assertEqualStatusOrException(StatusOrException expected, StatusOrException actual, + boolean includeUpdated, boolean includeCreated) { + if (expected != null && actual != null) { + assertNotSame(expected, actual); + if (expected.getException() == null) { + BulkByScrollTaskStatusTests + // we test includeCreated params in the Status tests + .assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated); + } else { + assertThat( + actual.getException().getMessage(), + containsString(expected.getException().getMessage()) + ); + } + } else { + // If one of them is null both of them should be null + assertSame(expected, actual); + } + } + + @Override + protected void assertEqualInstances(StatusOrException expected, StatusOrException actual) { + assertEqualStatusOrException(expected, actual, true, true); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + /** + * Test parsing {@link StatusOrException} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = BulkByScrollTaskStatusOrExceptionTests::createTestInstanceWithExceptions; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index 9e5383a259adc..13db9f4766e79 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -23,37 +23,39 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; import org.hamcrest.Matchers; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import java.util.stream.IntStream; import static java.lang.Math.abs; -import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.apache.lucene.util.TestUtil.randomSimpleString; -import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; +import static org.hamcrest.Matchers.equalTo; + +public class BulkByScrollTaskStatusTests extends AbstractXContentTestCase { + + private boolean includeUpdated; + private boolean includeCreated; -public class BulkByScrollTaskStatusTests extends ESTestCase { public void testBulkByTaskStatus() throws IOException { BulkByScrollTask.Status status = randomStatus(); BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); assertTaskStatusEquals(out.getVersion(), status, tripped); - - // Also check round tripping pre-5.1 which is the first version to support parallelized scroll - out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0_rc1); // This can be V_5_0_0 - status.writeTo(out); - StreamInput in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0_rc1); - tripped = new BulkByScrollTask.Status(in); - assertTaskStatusEquals(Version.V_5_0_0_rc1, status, tripped); } /** @@ -74,23 +76,19 @@ public static void assertTaskStatusEquals(Version version, BulkByScrollTask.Stat assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f); assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled()); assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil()); - if (version.onOrAfter(Version.V_5_1_1)) { - assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); - for (int i = 0; i < expected.getSliceStatuses().size(); i++) { - BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); - if (sliceStatus == null) { - assertNull(actual.getSliceStatuses().get(i)); - } else if (sliceStatus.getException() == null) { - assertNull(actual.getSliceStatuses().get(i).getException()); - assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); - } else { - assertNull(actual.getSliceStatuses().get(i).getStatus()); - // Just check the message because we're not testing exception serialization in general here. - assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); - } + assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); + for (int i = 0; i < expected.getSliceStatuses().size(); i++) { + BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); + if (sliceStatus == null) { + assertNull(actual.getSliceStatuses().get(i)); + } else if (sliceStatus.getException() == null) { + assertNull(actual.getSliceStatuses().get(i).getException()); + assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); + } else { + assertNull(actual.getSliceStatuses().get(i).getStatus()); + // Just check the message because we're not testing exception serialization in general here. + assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); } - } else { - assertEquals(emptyList(), actual.getSliceStatuses()); } } @@ -113,6 +111,22 @@ public static BulkByScrollTask.Status randomStatus() { return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); } + public static BulkByScrollTask.Status randomStatusWithoutException() { + if (randomBoolean()) { + return randomWorkingStatus(null); + } + boolean canHaveNullStatues = randomBoolean(); + List statuses = IntStream.range(0, between(0, 10)) + .mapToObj(i -> { + if (canHaveNullStatues && LuceneTestCase.rarely()) { + return null; + } + return new BulkByScrollTask.StatusOrException(randomWorkingStatus(i)); + }) + .collect(toList()); + return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); + } + private static BulkByScrollTask.Status randomWorkingStatus(Integer sliceId) { // These all should be believably small because we sum them if we have multiple workers int total = between(0, 10000000); @@ -124,8 +138,83 @@ private static BulkByScrollTask.Status randomWorkingStatus(Integer sliceId) { long versionConflicts = between(0, total); long bulkRetries = between(0, 10000000); long searchRetries = between(0, 100000); - return new BulkByScrollTask.Status(sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, - searchRetries, parseTimeValue(randomPositiveTimeValue(), "test"), abs(Randomness.get().nextFloat()), - randomBoolean() ? null : randomSimpleString(Randomness.get()), parseTimeValue(randomPositiveTimeValue(), "test")); + // smallest unit of time during toXContent is Milliseconds + TimeUnit[] timeUnits = {TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS}; + TimeValue throttled = new TimeValue(randomIntBetween(0, 1000), randomFrom(timeUnits)); + TimeValue throttledUntil = new TimeValue(randomIntBetween(0, 1000), randomFrom(timeUnits)); + return + new BulkByScrollTask.Status( + sliceId, total, updated, created, deleted, batches, versionConflicts, noops, + bulkRetries, searchRetries, throttled, abs(Randomness.get().nextFloat()), + randomBoolean() ? null : randomSimpleString(Randomness.get()), throttledUntil + ); + } + + public static void assertEqualStatus(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual, + boolean includeUpdated, boolean includeCreated) { + assertNotSame(expected, actual); + assertTrue(expected.equalsWithoutSliceStatus(actual, includeUpdated, includeCreated)); + assertThat(expected.getSliceStatuses().size(), equalTo(actual.getSliceStatuses().size())); + for (int i = 0; i< expected.getSliceStatuses().size(); i++) { + BulkByScrollTaskStatusOrExceptionTests.assertEqualStatusOrException( + expected.getSliceStatuses().get(i), actual.getSliceStatuses().get(i), includeUpdated, includeCreated + ); + } + } + + @Override + protected void assertEqualInstances(BulkByScrollTask.Status first, BulkByScrollTask.Status second) { + assertEqualStatus(first, second, includeUpdated, includeCreated); + } + + @Override + protected BulkByScrollTask.Status createTestInstance() { + // failures are tested separately, so we can test xcontent equivalence at least when we have no failures + return randomStatusWithoutException(); + } + + @Override + protected BulkByScrollTask.Status doParseInstance(XContentParser parser) throws IOException { + return BulkByScrollTask.Status.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + /** + * Test parsing {@link Status} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = BulkByScrollTaskStatusTests::randomStatus; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 6c1988a1440e9..1c3d539263e7d 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.search.slice.SliceBuilder; @@ -89,9 +87,9 @@ protected void extraForSliceAssertions(ReindexRequest original, ReindexRequest f @Override protected ReindexRequest newRequest() { - ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest()); - reindex.getSearchRequest().indices("source"); - reindex.getDestination().index("dest"); + ReindexRequest reindex = new ReindexRequest(); + reindex.setSourceIndices("source"); + reindex.setDestIndex("dest"); return reindex; } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java index b30968cf056b5..47449eb739199 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import static org.apache.lucene.util.TestUtil.randomSimpleString; @@ -32,11 +31,11 @@ public void testUpdateByQueryRequestImplementsIndicesRequestReplaceable() { indices[i] = randomSimpleString(random(), 1, 30); } - SearchRequest searchRequest = new SearchRequest(indices); IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - searchRequest.indicesOptions(indicesOptions); - UpdateByQueryRequest request = new UpdateByQueryRequest(searchRequest); + UpdateByQueryRequest request = new UpdateByQueryRequest(); + request.indices(indices); + request.setIndicesOptions(indicesOptions); for (int i = 0; i < numIndices; i++) { assertEquals(indices[i], request.indices()[i]); } @@ -60,7 +59,7 @@ public void testUpdateByQueryRequestImplementsIndicesRequestReplaceable() { @Override protected UpdateByQueryRequest newRequest() { - return new UpdateByQueryRequest(new SearchRequest(randomAlphaOfLength(5))); + return new UpdateByQueryRequest(randomAlphaOfLength(5)); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 5ea717873143c..fba71dd1e5296 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.replication; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -36,9 +37,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.engine.SegmentsStats; @@ -49,6 +48,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTests; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.threadpool.TestThreadPool; @@ -56,6 +56,7 @@ import org.hamcrest.Matcher; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -342,56 +343,89 @@ public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exceptio * for primary and replica shards */ public void testDocumentFailureReplication() throws Exception { - String failureMessage = "simulated document failure"; - final EngineFactory failIndexingOpsEngine = new EngineFactory() { - @Override - public Engine newReadWriteEngine(EngineConfig config) { - return EngineTestCase.createInternalEngine((directory, writerConfig) -> - new IndexWriter(directory, writerConfig) { - @Override - public long addDocument(Iterable doc) throws IOException { - boolean isTombstone = false; - for (IndexableField field : doc) { - if (SeqNoFieldMapper.TOMBSTONE_NAME.equals(field.name())) { - isTombstone = true; - } - } - if (isTombstone) { - return super.addDocument(doc); - } else { - throw new IOException(failureMessage); - } + final IOException indexException = new IOException("simulated indexing failure"); + final IOException deleteException = new IOException("simulated deleting failure"); + final EngineFactory engineFactory = config -> InternalEngineTests.createInternalEngine((dir, iwc) -> + new IndexWriter(dir, iwc) { + final AtomicBoolean throwAfterIndexedOneDoc = new AtomicBoolean(); // need one document to trigger delete in IW. + @Override + public long addDocument(Iterable doc) throws IOException { + boolean isTombstone = false; + for (IndexableField field : doc) { + if (SeqNoFieldMapper.TOMBSTONE_NAME.equals(field.name())) { + isTombstone = true; } - }, null, null, config); - } - }; + } + if (isTombstone == false && throwAfterIndexedOneDoc.getAndSet(true)) { + throw indexException; + } else { + return super.addDocument(doc); + } + } + @Override + public long deleteDocuments(Term... terms) throws IOException { + throw deleteException; + } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field...fields) throws IOException { + throw deleteException; // a delete uses softUpdateDocument API if soft-deletes enabled + } + }, null, null, config); try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(0)) { @Override - protected EngineFactory getEngineFactory(ShardRouting routing) { return failIndexingOpsEngine; }}) { + protected EngineFactory getEngineFactory(ShardRouting routing) { return engineFactory; }}) { - // test only primary + // start with the primary only so two first failures are replicated to replicas via recovery from the translog of the primary. shards.startPrimary(); - BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "type", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPendingPrimaryTerm(), failureMessage); - shards.assertAllEqual(0); + long primaryTerm = shards.getPrimary().getPendingPrimaryTerm(); + List expectedTranslogOps = new ArrayList<>(); + BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName(), "type", "1").source("{}", XContentType.JSON)); + assertThat(indexResp.isFailed(), equalTo(false)); + expectedTranslogOps.add(new Translog.Index("type", "1", 0, primaryTerm, 1, "{}".getBytes(StandardCharsets.UTF_8), null, -1)); + try (Translog.Snapshot snapshot = getTranslog(shards.getPrimary()).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + + indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); + assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); + expectedTranslogOps.add(new Translog.NoOp(1, primaryTerm, indexException.toString())); + + BulkItemResponse deleteResp = shards.delete(new DeleteRequest(index.getName(), "type", "1")); + assertThat(deleteResp.getFailure().getCause(), equalTo(deleteException)); + expectedTranslogOps.add(new Translog.NoOp(2, primaryTerm, deleteException.toString())); + shards.assertAllEqual(1); - // add some replicas int nReplica = randomIntBetween(1, 3); for (int i = 0; i < nReplica; i++) { shards.addReplica(); } shards.startReplicas(nReplica); - response = shards.index( - new IndexRequest(index.getName(), "type", "1") - .source("{}", XContentType.JSON) - ); - assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPendingPrimaryTerm(), failureMessage); - shards.assertAllEqual(0); + for (IndexShard shard : shards) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + } + // unlike previous failures, these two failures replicated directly from the replication channel. + indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); + assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); + expectedTranslogOps.add(new Translog.NoOp(3, primaryTerm, indexException.toString())); + + deleteResp = shards.delete(new DeleteRequest(index.getName(), "type", "1")); + assertThat(deleteResp.getFailure().getCause(), equalTo(deleteException)); + expectedTranslogOps.add(new Translog.NoOp(4, primaryTerm, deleteException.toString())); + + for (IndexShard shard : shards) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } + } + shards.assertAllEqual(1); } } @@ -564,26 +598,4 @@ public void testOutOfOrderDeliveryForAppendOnlyOperations() throws Exception { shards.assertAllEqual(0); } } - - private static void assertNoOpTranslogOperationForDocumentFailure( - Iterable replicationGroup, - int expectedOperation, - long expectedPrimaryTerm, - String failureMessage) throws IOException { - for (IndexShard indexShard : replicationGroup) { - try(Translog.Snapshot snapshot = getTranslog(indexShard).newSnapshot()) { - assertThat(snapshot.totalOperations(), equalTo(expectedOperation)); - long expectedSeqNo = 0L; - Translog.Operation op = snapshot.next(); - do { - assertThat(op.opType(), equalTo(Translog.Operation.Type.NO_OP)); - assertThat(op.seqNo(), equalTo(expectedSeqNo)); - assertThat(op.primaryTerm(), equalTo(expectedPrimaryTerm)); - assertThat(((Translog.NoOp) op).reason(), containsString(failureMessage)); - op = snapshot.next(); - expectedSeqNo++; - } while (op != null); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index ba2345af4f70b..28122665e9bb6 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -219,8 +219,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") public void testRecoveryAfterPrimaryPromotion() throws Exception { - Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); - try (ReplicationGroup shards = createGroup(2, settings)) { + try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); int totalDocs = shards.indexDocs(randomInt(10)); int committedDocs = 0; @@ -232,7 +231,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { final IndexShard oldPrimary = shards.getPrimary(); final IndexShard newPrimary = shards.getReplicas().get(0); final IndexShard replica = shards.getReplicas().get(1); - boolean softDeleteEnabled = replica.indexSettings().isSoftDeleteEnabled(); if (randomBoolean()) { // simulate docs that were inflight when primary failed, these will be rolled back final int rollbackDocs = randomIntBetween(1, 5); @@ -280,12 +278,13 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); }); newPrimary.flush(new FlushRequest().force(true)); - uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); - totalDocs += uncommittedOpsOnPrimary; - // we need an extra flush or refresh to advance the min_retained_seqno on the new primary so that ops-based won't happen - if (softDeleteEnabled) { + if (replica.indexSettings().isSoftDeleteEnabled()) { + // We need an extra flush to advance the min_retained_seqno on the new primary so ops-based won't happen. + // The min_retained_seqno only advances when a merge asks for the retention query. newPrimary.flush(new FlushRequest().force(true)); } + uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); + totalDocs += uncommittedOpsOnPrimary; } if (randomBoolean()) { @@ -305,8 +304,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs)); } else { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - int expectOps = softDeleteEnabled ? totalDocs : uncommittedOpsOnPrimary; - assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(expectOps)); + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedOpsOnPrimary)); } // roll back the extra ops in the replica diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 5091af5a5401e..584ed7085a8ae 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -118,6 +119,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; @@ -126,7 +128,11 @@ import java.io.IOException; import java.nio.charset.Charset; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1239,7 +1245,7 @@ public String[] listAll() throws IOException { }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { - IndexShard shard = newShard(shardRouting, shardPath, metaData, store, + IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, null, new InternalEngineFactory(), () -> { }, EMPTY_EVENT_LISTENER); AtomicBoolean failureCallbackTriggered = new AtomicBoolean(false); @@ -2400,8 +2406,7 @@ public void testRecoverFromLocalShard() throws IOException { closeShards(sourceShard, targetShard); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32766") - public void testDocStats() throws IOException { + public void testDocStats() throws IOException, InterruptedException { IndexShard indexShard = null; try { indexShard = newStartedShard( @@ -2460,15 +2465,6 @@ public void testDocStats() throws IOException { assertTrue(searcher.reader().numDocs() <= docStats.getCount()); } assertThat(docStats.getCount(), equalTo(numDocs)); - // Lucene will delete a segment if all docs are deleted from it; - // this means that we lose the deletes when deleting all docs. - // If soft-delete is enabled, each delete op will add a deletion marker. - final long deleteTombstones = indexShard.indexSettings.isSoftDeleteEnabled() ? numDocsToDelete : 0L; - if (numDocsToDelete == numDocs) { - assertThat(docStats.getDeleted(), equalTo(deleteTombstones)); - } else { - assertThat(docStats.getDeleted(), equalTo(numDocsToDelete + deleteTombstones)); - } } // merge them away @@ -2600,6 +2596,143 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept closeShards(newShard); } + public void testIndexCheckOnStartup() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final Path indexPath = corruptIndexFile(shardPath); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + + assertThat("corruption marker should not be there", corruptedMarkerCount.get(), equalTo(0)); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + // start shard and perform index check on startup. It enforce shard to fail due to corrupted index files + final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) + .settings(Settings.builder() + .put(indexShard.indexSettings.getSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("true", "checksum"))) + .build(); + + IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException indexShardRecoveryException = + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + assertThat(indexShardRecoveryException.getMessage(), equalTo("failed recovery")); + + // check that corrupt marker is there + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + try { + closeShards(corruptedShard); + } catch (RuntimeException e) { + assertThat(e.getMessage(), equalTo("CheckIndex failed")); + } + } + + public void testShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + final IndexMetaData indexMetaData = indexShard.indexSettings().getIndexMetaData(); + + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try(Store store = createStore(indexShard.indexSettings(), shardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + // try to start shard on corrupted files + final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception1 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard, true)); + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + // try to start another time shard on corrupted files + final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception2 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard2, true)); + assertThat(exception2.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard2); + + // check that corrupt marker is there + corruptedMarkerCount.set(0); + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); + } + + private Path corruptIndexFile(ShardPath shardPath) throws IOException { + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + final Path[] filesToCorrupt = + Files.walk(indexPath) + .filter(p -> { + final String name = p.getFileName().toString(); + return Files.isRegularFile(p) + && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS + && IndexWriter.WRITE_LOCK_NAME.equals(name) == false + && name.startsWith("segments_") == false && name.endsWith(".si") == false; + }) + .toArray(Path[]::new); + CorruptionUtils.corruptFile(random(), filesToCorrupt); + return indexPath; + } + /** * Simulates a scenario that happens when we are async fetching snapshot metadata from GatewayService * and checking index concurrently. This should always be possible without any exception. @@ -2623,7 +2756,7 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) .settings(Settings.builder() .put(indexShard.indexSettings.getSettings()) - .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index ae2cc84e4870c..36d52d4475b1a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -106,20 +106,26 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { .isPresent(), is(false)); } - - assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, resyncTask.getTotalOperations()); if (syncNeeded && globalCheckPoint < numDocs - 1) { - long skippedOps = globalCheckPoint + 1; // everything up to global checkpoint included - assertEquals(skippedOps, resyncTask.getSkippedOperations()); - assertEquals(numDocs - skippedOps, resyncTask.getResyncedOperations()); + if (shard.indexSettings.isSoftDeleteEnabled()) { + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(resyncTask.getTotalOperations())); + assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint))); + } else { + int skippedOps = Math.toIntExact(globalCheckPoint + 1); // everything up to global checkpoint included + assertThat(resyncTask.getSkippedOperations(), equalTo(skippedOps)); + assertThat(resyncTask.getResyncedOperations(), equalTo(numDocs - skippedOps)); + assertThat(resyncTask.getTotalOperations(), equalTo(globalCheckPoint == numDocs - 1 ? 0 : numDocs)); + } } else { - assertEquals(0, resyncTask.getSkippedOperations()); - assertEquals(0, resyncTask.getResyncedOperations()); + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(0)); + assertThat(resyncTask.getTotalOperations(), equalTo(0)); } - closeShards(shard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33330") public void testSyncerOnClosingShard() throws Exception { IndexShard shard = newStartedShard(true); AtomicBoolean syncActionCalled = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index e7109979332b4..b93f170174c3c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -134,7 +134,7 @@ indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilari (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm, EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 95772910747c4..04d15d39b58e9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -77,48 +77,4 @@ public void testGetForUpdate() throws IOException { closeShards(primary); } - - public void testGetForUpdateWithParentField() throws IOException { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put("index.version.created", Version.V_5_6_0) // for parent field mapper - .build(); - IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1).build(); - IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); - recoverShardFromStore(primary); - Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); - assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we refreshed - } - - Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog - } - primary.getEngine().refresh("test"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 2); - } - - // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - - closeShards(primary); - } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 9ae502fecb580..c8d4dbd43df2f 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ private Tuple, TranslogWriter> createReadersAndWriter(final } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L, randomNonNegativeLong()); + () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder()); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 1c27a59e0ecbe..a0e0c481e5f86 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Assertions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -108,6 +109,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -358,7 +360,8 @@ public void testSimpleOperations() throws IOException { } markCurrentGenAsCommitted(translog); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(firstId + 1)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), firstId + 1), randomNonNegativeLong())) { assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.totalOperations(), equalTo(0)); } @@ -643,6 +646,82 @@ public void testSnapshotOnClosedTranslog() throws IOException { } } + public void testSnapshotFromMinGen() throws Exception { + Map> operationsByGen = new HashMap<>(); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), 1), randomNonNegativeLong())) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + long currentGeneration = translog.currentFileGeneration(); + operationsByGen.putIfAbsent(currentGeneration, new ArrayList<>()); + int numOps = between(0, 20); + for (int op = 0; op < numOps; op++) { + long seqNo = randomLongBetween(0, 1000); + addToTranslogAndList(translog, operationsByGen.get(currentGeneration), new Translog.Index("test", + Long.toString(seqNo), seqNo, primaryTerm.get(), new byte[]{1})); + } + long minGen = randomLongBetween(translog.getMinFileGeneration(), translog.currentFileGeneration()); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), Long.MAX_VALUE)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream()) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + long upToSeqNo = randomLongBetween(0, 2000); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), upToSeqNo)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream().filter(op -> op.seqNo() <= upToSeqNo)) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + translog.rollGeneration(); + } + } + + public void testSeqNoFilterSnapshot() throws Exception { + final int generations = between(2, 20); + for (int gen = 0; gen < generations; gen++) { + List batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList()); + Randomness.shuffle(batch); + for (long seqNo : batch) { + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); + translog.add(op); + } + translog.rollGeneration(); + } + List operations = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + } + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, between(200, 300), between(300, 400)); // out range + assertThat(filter, SnapshotMatchers.size(0)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.totalOperations())); + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + int fromSeqNo = between(-2, 500); + int toSeqNo = between(fromSeqNo, 500); + List selectedOps = operations.stream() + .filter(op -> fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo).collect(Collectors.toList()); + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, fromSeqNo, toSeqNo); + assertThat(filter, SnapshotMatchers.containsOperationsInAnyOrder(selectedOps)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.skippedOperations() + operations.size() - selectedOps.size())); + } + } + public void assertFileIsPresent(Translog translog, long id) { if (Files.exists(translog.location().resolve(Translog.getFilename(id)))) { return; @@ -1302,7 +1381,7 @@ public void testBasicRecovery() throws IOException { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = minUncommittedOp; i < translogOperations; i++) { assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation); @@ -1655,7 +1734,7 @@ public void testRandomExceptionsOnTrimOperations( ) throws Exception { } assertThat(expectedException, is(not(nullValue()))); - + assertThat(failableTLog.getTragicException(), equalTo(expectedException)); assertThat(fileChannels, is(not(empty()))); assertThat("all file channels have to be closed", fileChannels.stream().filter(f -> f.isOpen()).findFirst().isPresent(), is(false)); @@ -1733,7 +1812,7 @@ public void testOpenForeignTranslog() throws IOException { } this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); assertNotNull("" + i, next); @@ -2505,11 +2584,13 @@ public void testWithRandomException() throws IOException { syncedDocs.addAll(unsynced); unsynced.clear(); } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { - // fair enough + assertEquals(failableTLog.getTragicException(), ex); } catch (IOException ex) { assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); + assertEquals(failableTLog.getTragicException(), ex); } catch (RuntimeException ex) { assertEquals(ex.getMessage(), "simulated"); + assertEquals(failableTLog.getTragicException(), ex); } finally { Checkpoint checkpoint = Translog.readCheckpoint(config.getTranslogPath()); if (checkpoint.numOps == unsynced.size() + syncedDocs.size()) { @@ -2553,7 +2634,8 @@ public void testWithRandomException() throws IOException { generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { + Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { Translog.Operation next = snapshot.next(); @@ -2931,6 +3013,47 @@ public void testCloseSnapshotTwice() throws Exception { } } + // close method should never be called directly from Translog (the only exception is closeOnTragicEvent) + public void testTranslogCloseInvariant() throws IOException { + assumeTrue("test only works with assertions enabled", Assertions.ENABLED); + class MisbehavingTranslog extends Translog { + MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); + } + + void callCloseDirectly() throws IOException { + close(); + } + + void callCloseUsingIOUtilsWithExceptionHandling() { + IOUtils.closeWhileHandlingException(this); + } + + void callCloseUsingIOUtils() throws IOException { + IOUtils.close(this); + } + + void callCloseOnTragicEvent() { + Exception e = new Exception("test tragic exception"); + tragedy.setTragicException(e); + closeOnTragicEvent(e); + } + } + + + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + Path path = createTempDir(); + final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtilsWithExceptionHandling()); + misbehavingTranslog.callCloseOnTragicEvent(); + } + static class SortedSnapshot implements Translog.Snapshot { private final Translog.Snapshot snapshot; private List operations = null; diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 47f30e10ef912..485fd92099630 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -136,7 +136,7 @@ public void testAnalyzerAliasNotAllowedPost5x() throws IOException { .put("index.analysis.analyzer.foobar.type", "standard") .put("index.analysis.analyzer.foobar.alias","foobaz") // analyzer aliases were removed in v5.0.0 alpha6 - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_beta1, null)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); AnalysisRegistry registry = getNewRegistry(settings); @@ -149,7 +149,7 @@ public void testVersionedAnalyzers() throws Exception { Settings settings2 = Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); @@ -162,9 +162,9 @@ public void testVersionedAnalyzers() throws Exception { // analysis service has the expected version assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("stop").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 580696264bdd4..c68e4870aaeb0 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -273,7 +273,7 @@ public IndexSettings getIndexSettings() { } @Override - public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { + public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { failRandomly(); return false; } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 51698d443925b..b6f5a7b645169 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -92,7 +92,7 @@ public void testGetStartingSeqNo() throws Exception { replica.close("test", false); final List commits = DirectoryReader.listCommits(replica.store().directory()); IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index fa19702d39b5f..45535e19672c7 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.HashMap; import java.util.List; @@ -65,17 +64,16 @@ public void testTranslogHistoryTransferred() throws Exception { int docs = shards.indexDocs(10); getTranslog(shards.getPrimary()).rollGeneration(); shards.flush(); - if (randomBoolean()) { - docs += shards.indexDocs(10); - } + int moreDocs = shards.indexDocs(randomInt(10)); shards.addReplica(); shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); - assertThat(getTranslog(replica).totalOperations(), equalTo(docs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + shards.assertAllEqual(docs + moreDocs); } } - @TestLogging("_root:TRACE") public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); @@ -109,7 +107,7 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { } } - public void testRecoveryWithOutOfOrderDelete() throws Exception { + public void testRecoveryWithOutOfOrderDeleteWithTranslog() throws Exception { /* * The flow of this test: * - delete #1 @@ -119,12 +117,9 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { * - flush (commit point has max_seqno 3, and local checkpoint 1 -> points at gen 2, previous commit point is maintained) * - index #2 * - index #5 - * - If flush and the translog/lucene retention disabled, delete #1 will be removed while index #0 is still retained and replayed. + * - If flush and the translog retention disabled, delete #1 will be removed while index #0 is still retained and replayed. */ - Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) - // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted - // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build(); + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); // create out of order delete and index op on replica @@ -133,7 +128,7 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { // delete #1 orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id"); - orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment + getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation // index #0 orgReplica.applyIndexOperationOnReplica(0, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON)); @@ -153,17 +148,16 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { final int translogOps; if (randomBoolean()) { if (randomBoolean()) { - logger.info("--> flushing shard (translog/soft-deletes will be trimmed)"); + logger.info("--> flushing shard (translog will be trimmed)"); IndexMetaData.Builder builder = IndexMetaData.builder(orgReplica.indexSettings().getIndexMetaData()); builder.settings(Settings.builder().put(orgReplica.indexSettings().getSettings()) .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0)); + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1")); orgReplica.indexSettings().updateIndexMetaData(builder.build()); orgReplica.onSettingsChanged(); translogOps = 5; // 4 ops + seqno gaps (delete #1 is removed but index #0 will be replayed). } else { - logger.info("--> flushing shard (translog/soft-deletes will be retained)"); + logger.info("--> flushing shard (translog will be retained)"); translogOps = 6; // 5 ops + seqno gaps } flushShard(orgReplica); @@ -182,6 +176,62 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { } } + public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) + // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted + // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { + shards.startAll(); + // create out of order delete and index op on replica + final IndexShard orgReplica = shards.getReplicas().get(0); + final String indexName = orgReplica.shardId().getIndexName(); + + // delete #1 + orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id"); + orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment + // index #0 + orgReplica.applyIndexOperationOnReplica(0, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON)); + // index #3 + orgReplica.applyIndexOperationOnReplica(3, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON)); + // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. + orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); + // index #2 + orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); + // index #5 -> force NoOp #4. + orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON)); + + if (randomBoolean()) { + if (randomBoolean()) { + logger.info("--> flushing shard (translog/soft-deletes will be trimmed)"); + IndexMetaData.Builder builder = IndexMetaData.builder(orgReplica.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(orgReplica.indexSettings().getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0)); + orgReplica.indexSettings().updateIndexMetaData(builder.build()); + orgReplica.onSettingsChanged(); + } + flushShard(orgReplica); + } + + final IndexShard orgPrimary = shards.getPrimary(); + shards.promoteReplicaToPrimary(orgReplica).get(); // wait for primary/replica sync to make sure seq# gap is closed. + + IndexShard newReplica = shards.addReplicaWithExistingPath(orgPrimary.shardPath(), orgPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + shards.assertAllEqual(3); + try (Translog.Snapshot snapshot = newReplica.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.size(6)); + } + } + } + public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { try (ReplicationGroup shards = createGroup(1)) { shards.startAll(); @@ -230,7 +280,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.recoverReplica(newReplica); // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(getTranslog(newReplica).totalOperations(), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -334,7 +385,8 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { shards.recoverReplica(replica); // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); - assertThat(getTranslog(replica).totalOperations(), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); shards.assertAllEqual(numDocs); } } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 8edf0f45cfbbf..ce162b9600cf4 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -729,7 +729,7 @@ public void testMultiIndex() throws Exception { public void testFieldDataFieldsParam() throws Exception { assertAcked(client().admin().indices().prepareCreate("test1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) + .setSettings(Settings.builder().put("index.version.created", Version.V_6_0_0.id)) .addMapping("_doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true").get()); diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java new file mode 100644 index 0000000000000..2cb13af7a2808 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.Is.is; + +public class ConditionalProcessorTests extends ESTestCase { + + public void testChecksCondition() throws Exception { + String conditionalField = "field1"; + String scriptName = "conditionalScript"; + String trueValue = "truthy"; + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> trueValue.equals(ctx.get(conditionalField)) + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, + new Processor() { + @Override + public void execute(final IngestDocument ingestDocument) throws Exception { + ingestDocument.setFieldValue("foo", "bar"); + } + + @Override + public String getType() { + return null; + } + + @Override + public String getTag() { + return null; + } + }); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, trueValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue)); + assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar")); + + String falseValue = "falsy"; + ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, falseValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(falseValue)); + assertThat(ingestDocument.getSourceAndMetadata(), not(hasKey("foo"))); + } + + @SuppressWarnings("unchecked") + public void testActsOnImmutableData() throws Exception { + assertMutatingCtxThrows(ctx -> ctx.remove("foo")); + assertMutatingCtxThrows(ctx -> ctx.put("foo", "bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).add("bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).remove("bar")); + } + + private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { + String scriptName = "conditionalScript"; + CompletableFuture expectedException = new CompletableFuture<>(); + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> { + try { + mutation.accept(ctx); + } catch (Exception e) { + expectedException.complete(e); + } + return false; + } + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, null + ); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue("listField", new ArrayList<>()); + processor.execute(ingestDocument); + Exception e = expectedException.get(); + assertThat(e, instanceOf(UnsupportedOperationException.class)); + assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); + } +} diff --git a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index 61afd9ce2a473..f3a11a86e54e5 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -38,6 +39,9 @@ import static org.mockito.Mockito.mock; public class ConfigurationUtilsTests extends ESTestCase { + + private final ScriptService scriptService = mock(ScriptService.class); + private Map config; @Before @@ -120,7 +124,7 @@ public void testReadProcessors() throws Exception { config.add(Collections.singletonMap("test_processor", emptyConfig)); config.add(Collections.singletonMap("test_processor", emptyConfig)); - List result = ConfigurationUtils.readProcessorConfigs(config, registry); + List result = ConfigurationUtils.readProcessorConfigs(config, scriptService, registry); assertThat(result.size(), equalTo(2)); assertThat(result.get(0), sameInstance(processor)); assertThat(result.get(1), sameInstance(processor)); @@ -129,7 +133,7 @@ public void testReadProcessors() throws Exception { unknownTaggedConfig.put("tag", "my_unknown"); config.add(Collections.singletonMap("unknown_processor", unknownTaggedConfig)); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessorConfigs(config, registry)); + () -> ConfigurationUtils.readProcessorConfigs(config, scriptService, registry)); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -142,7 +146,10 @@ public void testReadProcessors() throws Exception { Map secondUnknonwTaggedConfig = new HashMap<>(); secondUnknonwTaggedConfig.put("tag", "my_second_unknown"); config2.add(Collections.singletonMap("second_unknown_processor", secondUnknonwTaggedConfig)); - e = expectThrows(ElasticsearchParseException.class, () -> ConfigurationUtils.readProcessorConfigs(config2, registry)); + e = expectThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readProcessorConfigs(config2, scriptService, registry) + ); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -166,17 +173,17 @@ public void testReadProcessorFromObjectOrMap() throws Exception { }); Object emptyConfig = Collections.emptyMap(); - Processor processor1 = ConfigurationUtils.readProcessor(registry, "script", emptyConfig); + Processor processor1 = ConfigurationUtils.readProcessor(registry, scriptService, "script", emptyConfig); assertThat(processor1, sameInstance(processor)); Object inlineScript = "test_script"; - Processor processor2 = ConfigurationUtils.readProcessor(registry, "script", inlineScript); + Processor processor2 = ConfigurationUtils.readProcessor(registry, scriptService, "script", inlineScript); assertThat(processor2, sameInstance(processor)); Object invalidConfig = 12L; ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessor(registry, "unknown_processor", invalidConfig)); + () -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig)); assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]")); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 338e5b662c5da..4c2352bfebe7d 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -97,12 +97,12 @@ public void testFailStartNode() throws Exception { AcknowledgedResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); assertThat(response.isAcknowledged(), is(true)); - Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipelineStore().get("_id"); + Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipeline("_id"); assertThat(pipeline, notNullValue()); installPlugin = false; String node2 = internalCluster().startNode(); - pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipelineStore().get("_id"); + pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipeline("_id"); assertNotNull(pipeline); assertThat(pipeline.getId(), equalTo("_id")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index c0353acb7f9d0..83a5bef4de279 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -21,16 +21,69 @@ import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; import java.util.Map; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.mockito.Mockito; +import org.hamcrest.CustomTypeSafeMatcher; +import org.mockito.ArgumentMatcher; +import org.mockito.invocation.InvocationOnMock; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IngestServiceTests extends ESTestCase { - private final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { + + private static final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { @Override public Map getProcessors(Processor.Parameters parameters) { return Collections.singletonMap("foo", (factories, tag, config) -> null); @@ -38,19 +91,812 @@ public Map getProcessors(Processor.Parameters paramet }; public void testIngestPlugin() { - ThreadPool tp = Mockito.mock(ThreadPool.class); - IngestService ingestService = new IngestService(Settings.EMPTY, tp, null, null, + ThreadPool tp = mock(ThreadPool.class); + IngestService ingestService = new IngestService(mock(ClusterService.class), tp, null, null, null, Collections.singletonList(DUMMY_PLUGIN)); - Map factories = ingestService.getPipelineStore().getProcessorFactories(); + Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); assertEquals(1, factories.size()); } public void testIngestPluginDuplicate() { - ThreadPool tp = Mockito.mock(ThreadPool.class); + ThreadPool tp = mock(ThreadPool.class); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - new IngestService(Settings.EMPTY, tp, null, null, + new IngestService(mock(ClusterService.class), tp, null, null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN))); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); } + + public void testExecuteIndexPipelineDoesNotExist() { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + IngestService ingestService = new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(DUMMY_PLUGIN)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + + final SetOnce failure = new SetOnce<>(); + final BiConsumer failureHandler = (request, e) -> { + failure.set(true); + assertThat(request, sameInstance(indexRequest)); + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testUpdatePipelines() { + IngestService ingestService = createWithProcessors(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(0)); + + PipelineConfiguration pipeline = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(1)); + assertThat(ingestService.pipelines().get("_id").getId(), equalTo("_id")); + assertThat(ingestService.pipelines().get("_id").getDescription(), nullValue()); + assertThat(ingestService.pipelines().get("_id").getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id").getProcessors().get(0).getType(), equalTo("set")); + } + + public void testDelete() { + IngestService ingestService = createWithProcessors(); + PipelineConfiguration config = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), notNullValue()); + + // Delete pipeline: + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), nullValue()); + + // Delete existing pipeline: + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); + } + } + + public void testValidateNoIngestInfo() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + Exception e = expectThrows(IllegalStateException.class, () -> ingestService.validatePipeline(emptyMap(), putRequest)); + assertEquals("Ingest info is empty", e.getMessage()); + + DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); + ingestService.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); + } + + public void testCrud() throws Exception { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + } + + public void testPut() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + // add a new pipeline: + PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + + // overwrite existing pipeline: + putRequest = + new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), equalTo("_description")); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + } + + public void testPutWithErrorResponse() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + PutPipelineRequest putRequest = + new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + try { + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[processors] required property is missing")); + } + pipeline = ingestService.getPipeline(id); + assertNotNull(pipeline); + assertThat(pipeline.getId(), equalTo("_id")); + assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + + " id [_id] could not be loaded")); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertNull(pipeline.getProcessors().get(0).getTag()); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); + } + + public void testDeleteUsingWildcard() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); + pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + assertThat(ingestService.getPipeline("p2"), notNullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Delete pipeline matching wildcard + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Exception if we used name which does not exist + try { + IngestService.innerDelete(new DeletePipelineRequest("unknown"), clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); + } + + // match all wildcard works on last remaining pipeline + DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(matchAllDeleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), nullValue()); + + // match all wildcard does not throw exception if none match + IngestService.innerDelete(matchAllDeleteRequest, clusterState); + } + + public void testDeleteWithExistingUnmatchedPipelines() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); + } + } + + public void testGetPipelines() { + Map configs = new HashMap<>(); + configs.put("_id1", new PipelineConfiguration( + "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + configs.put("_id2", new PipelineConfiguration( + "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + + assertThat(IngestService.innerGetPipelines(null, "_id1").isEmpty(), is(true)); + + IngestMetadata ingestMetadata = new IngestMetadata(configs); + List pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1"); + assertThat(pipelines.size(), equalTo(1)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1", "_id2"); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + // get all variants: (no IDs or '*') + pipelines = IngestService.innerGetPipelines(ingestMetadata); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + } + + public void testValidate() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), + XContentType.JSON); + + DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + Map ingestInfos = new HashMap<>(); + ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); + + ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> ingestService.validatePipeline(ingestInfos, putRequest)); + assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); + assertEquals("remove", e.getMetadata("es.processor_type").get(0)); + assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); + + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestService.validatePipeline(ingestInfos, putRequest); + } + + public void testExecuteIndexPipelineExistsButFailedParsing() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> new AbstractProcessor("mock") { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException("error"); + } + + @Override + public String getType() { + return null; + } + } + )); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + String id = "_id"; + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final SetOnce failure = new SetOnce<>(); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline(id); + final BiConsumer failureHandler = (request, e) -> { + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); + assertThat(e.getCause().getCause().getMessage(), equalTo("error")); + failure.set(true); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteBulkPipelineDoesNotExist() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + BulkRequest bulkRequest = new BulkRequest(); + + IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + bulkRequest.add(indexRequest1); + IndexRequest indexRequest2 = + new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); + bulkRequest.add(indexRequest2); + @SuppressWarnings("unchecked") + BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); + verify(failureHandler, times(1)).accept( + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IndexRequest item) { + return item == indexRequest2; + } + + }), + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IllegalArgumentException iae) { + return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); + } + }) + ); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccess() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteEmptyPipeline() throws Exception { + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecutePropagateAllMetaDataUpdates() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final long newVersion = randomLong(); + final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); + doAnswer((InvocationOnMock invocationOnMock) -> { + IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; + for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { + if (metaData == IngestDocument.MetaData.VERSION) { + ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); + } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { + ingestDocument.setFieldValue(metaData.getFieldName(), versionType); + } else { + ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); + } + } + return null; + }).when(processor).execute(any()); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(any()); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + assertThat(indexRequest.index(), equalTo("update_index")); + assertThat(indexRequest.type(), equalTo("update_type")); + assertThat(indexRequest.id(), equalTo("update_id")); + assertThat(indexRequest.routing(), equalTo("update_routing")); + assertThat(indexRequest.version(), equalTo(newVersion)); + assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); + } + + public void testExecuteFailure() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccessWithOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + when(processor.getType()).thenReturn("mock_processor_type"); + when(processor.getTag()).thenReturn("mock_processor_tag"); + final Processor onFailureProcessor = mock(Processor.class); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteFailureWithNestedOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + final Processor onFailureProcessor = mock(Processor.class); + final Processor onFailureOnFailureProcessor = mock(Processor.class); + final List processors = Collections.singletonList(onFailureProcessor); + final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, + Collections.singletonList(processor), + Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(onFailureOnFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(onFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecutionWithFailures() throws Exception { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + int numIndexRequests = 0; + for (int i = 0; i < numRequest; i++) { + DocWriteRequest request; + if (randomBoolean()) { + if (randomBoolean()) { + request = new DeleteRequest("_index", "_type", "_id"); + } else { + request = new UpdateRequest("_index", "_type", "_id"); + } + } else { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + request = indexRequest; + numIndexRequests++; + } + bulkRequest.add(request); + } + + CompoundProcessor processor = mock(CompoundProcessor.class); + when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); + Exception error = new RuntimeException(); + doThrow(error).when(processor).execute(any()); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); + + verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), argThat(new ArgumentMatcher() { + @Override + public boolean matches(final Object o) { + return ((Exception)o).getCause().getCause().equals(error); + } + })); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecution() { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + for (int i = 0; i < numRequest; i++) { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + bulkRequest.add(indexRequest); + } + + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); + + verify(requestItemErrorHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testStats() { + final Processor processor = mock(Processor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + final IngestStats initialStats = ingestService.stats(); + assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id1", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + putRequest = new PutPipelineRequest("_id2", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + + @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") final Consumer completionHandler = mock(Consumer.class); + + final IndexRequest indexRequest = new IndexRequest("_index"); + indexRequest.setPipeline("_id1"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterFirstRequestStats = ingestService.stats(); + assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); + + indexRequest.setPipeline("_id2"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterSecondRequestStats = ingestService.stats(); + assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); + } + + // issue: https://github.com/elastic/elasticsearch/issues/18126 + public void testUpdatingStatsWhenRemovingPipelineWorks() { + IngestService ingestService = createWithProcessors(); + Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id1")); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id2")); + + configurationMap = new HashMap<>(); + configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); + } + + private IngestDocument eqIndexTypeId(final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); + } + + private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); + } + + private static IngestService createWithProcessors() { + Map processors = new HashMap<>(); + processors.put("set", (factories, tag, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new Processor() { + @Override + public void execute(IngestDocument ingestDocument) { + ingestDocument.setFieldValue(field, value); + } + + @Override + public String getType() { + return "set"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + processors.put("remove", (factories, tag, config) -> { + String field = (String) config.remove("field"); + return new Processor() { + @Override + public void execute(IngestDocument ingestDocument) { + ingestDocument.removeField(field); + } + + @Override + public String getType() { + return "remove"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + return createWithProcessors(processors); + } + + private static IngestService createWithProcessors(Map processors) { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + return new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + })); + } + + private class IngestDocumentMatcher extends ArgumentMatcher { + + private final IngestDocument ingestDocument; + + IngestDocumentMatcher(String index, String type, String id, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); + } + + IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); + } + + @Override + public boolean matches(Object o) { + if (o.getClass() == IngestDocument.class) { + IngestDocument otherIngestDocument = (IngestDocument) o; + //ingest metadata will not be the same (timestamp differs every time) + return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); + } + return false; + } + } } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java deleted file mode 100644 index 15a23421da26a..0000000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.hamcrest.CustomTypeSafeMatcher; -import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.invocation.InvocationOnMock; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class PipelineExecutionServiceTests extends ESTestCase { - - private final Integer version = randomBoolean() ? randomInt() : null; - private PipelineStore store; - private PipelineExecutionService executionService; - - @Before - public void setup() { - store = mock(PipelineStore.class); - ThreadPool threadPool = mock(ThreadPool.class); - final ExecutorService executorService = EsExecutors.newDirectExecutorService(); - when(threadPool.executor(anyString())).thenReturn(executorService); - executionService = new PipelineExecutionService(store, threadPool); - } - - public void testExecuteIndexPipelineDoesNotExist() { - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - - final SetOnce failure = new SetOnce<>(); - final BiConsumer failureHandler = (request, e) -> { - failure.set(true); - assertThat(request, sameInstance(indexRequest)); - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteIndexPipelineExistsButFailedParsing() { - when(store.get("_id")).thenReturn(new Pipeline("_id", "stub", null, - new CompoundProcessor(new AbstractProcessor("mock") { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException("error"); - } - - @Override - public String getType() { - return null; - } - }))); - - final SetOnce failure = new SetOnce<>(); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - final BiConsumer failureHandler = (request, e) -> { - assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); - assertThat(e.getCause().getCause().getMessage(), equalTo("error")); - failure.set(true); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteBulkPipelineDoesNotExist() { - CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - BulkRequest bulkRequest = new BulkRequest(); - - IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = - new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); - bulkRequest.add(indexRequest2); - @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); - verify(failureHandler, times(1)).accept( - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IndexRequest item) { - return item == indexRequest2; - } - - }), - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IllegalArgumentException iae) { - return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); - } - }) - ); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccess() { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteEmptyPipeline() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - when(processor.getProcessors()).thenReturn(Collections.emptyList()); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor, never()).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecutePropagateAllMetaDataUpdates() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - final long newVersion = randomLong(); - final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); - doAnswer((InvocationOnMock invocationOnMock) -> { - IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; - for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { - if (metaData == IngestDocument.MetaData.VERSION) { - ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); - } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { - ingestDocument.setFieldValue(metaData.getFieldName(), versionType); - } else { - ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); - } - } - return null; - }).when(processor).execute(any()); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - assertThat(indexRequest.index(), equalTo("update_index")); - assertThat(indexRequest.type(), equalTo("update_type")); - assertThat(indexRequest.id(), equalTo("update_id")); - assertThat(indexRequest.routing(), equalTo("update_routing")); - assertThat(indexRequest.version(), equalTo(newVersion)); - assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); - } - - public void testExecuteFailure() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccessWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - when(processor.getType()).thenReturn("mock_processor_type"); - when(processor.getTag()).thenReturn("mock_processor_tag"); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithNestedOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final Processor onFailureOnFailureProcessor = mock(Processor.class); - final List processors = Collections.singletonList(onFailureProcessor); - final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, - Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(onFailureOnFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecutionWithFailures() throws Exception { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - int numIndexRequests = 0; - for (int i = 0; i < numRequest; i++) { - DocWriteRequest request; - if (randomBoolean()) { - if (randomBoolean()) { - request = new DeleteRequest("_index", "_type", "_id"); - } else { - request = new UpdateRequest("_index", "_type", "_id"); - } - } else { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - request = indexRequest; - numIndexRequests++; - } - bulkRequest.add(request); - } - - CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - Exception error = new RuntimeException(); - doThrow(error).when(processor).execute(any()); - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, processor)); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), eq(error)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecution() { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - for (int i = 0; i < numRequest; i++) { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - bulkRequest.add(indexRequest); - } - - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, new CompoundProcessor())); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testStats() { - final IngestStats initialStats = executionService.stats(); - assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); - assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); - - when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, version, new CompoundProcessor(mock(Processor.class)))); - when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, null, new CompoundProcessor(mock(Processor.class)))); - - final Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - final IndexRequest indexRequest = new IndexRequest("_index"); - indexRequest.setPipeline("_id1"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterFirstRequestStats = executionService.stats(); - assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); - assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); - - indexRequest.setPipeline("_id2"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterSecondRequestStats = executionService.stats(); - assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); - } - - // issue: https://github.com/elastic/elasticsearch/issues/18126 - public void testUpdatingStatsWhenRemovingPipelineWorks() { - Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id1")); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id2")); - - configurationMap = new HashMap<>(); - configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); - } - - private IngestDocument eqIndexTypeId(final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); - } - - private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); - } - - private class IngestDocumentMatcher extends ArgumentMatcher { - - private final IngestDocument ingestDocument; - - IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); - } - - IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); - } - - @Override - public boolean matches(Object o) { - if (o.getClass() == IngestDocument.class) { - IngestDocument otherIngestDocument = (IngestDocument) o; - //ingest metadata will not be the same (timestamp differs every time) - return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); - } - return false; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java index 461873a3fe3d2..d6d7b4ffa816b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -32,11 +33,13 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class PipelineFactoryTests extends ESTestCase { private final Integer version = randomBoolean() ? randomInt() : null; private final String versionString = version != null ? Integer.toString(version) : null; + private final ScriptService scriptService = mock(ScriptService.class); public void testCreate() throws Exception { Map processorConfig0 = new HashMap<>(); @@ -47,9 +50,8 @@ public void testCreate() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -64,9 +66,8 @@ public void testCreateWithNoProcessorsField() throws Exception { Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); - Pipeline.Factory factory = new Pipeline.Factory(); try { - factory.create("_id", pipelineConfig, Collections.emptyMap()); + Pipeline.create("_id", pipelineConfig, Collections.emptyMap(), scriptService); fail("should fail, missing required [processors] field"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); @@ -78,8 +79,7 @@ public void testCreateWithEmptyProcessorsField() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); - Pipeline pipeline = factory.create("_id", pipelineConfig, null); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -93,9 +93,8 @@ public void testCreateWithPipelineOnFailure() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -112,9 +111,11 @@ public void testCreateWithPipelineEmptyOnFailure() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("pipeline [_id] cannot have an empty on_failure option defined")); } @@ -125,9 +126,11 @@ public void testCreateWithPipelineEmptyOnFailureInProcessor() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("[on_failure] processors list cannot be empty")); } @@ -136,14 +139,13 @@ public void testCreateWithPipelineIgnoreFailure() throws Exception { processorConfig.put("ignore_failure", true); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline.Factory factory = new Pipeline.Factory(); Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -162,9 +164,11 @@ public void testCreateUnusedProcessorOptions() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } @@ -176,9 +180,8 @@ public void testCreateProcessorsWithOnFailureProperties() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java deleted file mode 100644 index d0ce465fc9ef8..0000000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class PipelineStoreTests extends ESTestCase { - - private PipelineStore store; - - @Before - public void init() throws Exception { - Map processorFactories = new HashMap<>(); - processorFactories.put("set", (factories, tag, config) -> { - String field = (String) config.remove("field"); - String value = (String) config.remove("value"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.setFieldValue(field, value); - } - - @Override - public String getType() { - return "set"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - processorFactories.put("remove", (factories, tag, config) -> { - String field = (String) config.remove("field"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.removeField(field); - } - - @Override - public String getType() { - return "remove"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - store = new PipelineStore(Settings.EMPTY, processorFactories); - } - - public void testUpdatePipelines() { - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(0)); - - PipelineConfiguration pipeline = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); - clusterState = ClusterState.builder(clusterState) - .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) - .build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(1)); - assertThat(store.pipelines.get("_id").getId(), equalTo("_id")); - assertThat(store.pipelines.get("_id").getDescription(), nullValue()); - assertThat(store.pipelines.get("_id").getProcessors().size(), equalTo(1)); - assertThat(store.pipelines.get("_id").getProcessors().get(0).getType(), equalTo("set")); - } - - public void testPut() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - // add a new pipeline: - PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - - // overwrite existing pipeline: - putRequest = - new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); - previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), equalTo("_description")); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - } - - public void testPutWithErrorResponse() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - PutPipelineRequest putRequest = - new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - try { - store.innerUpdatePipelines(previousClusterState, clusterState); - fail("should fail"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[processors] required property is missing")); - } - pipeline = store.get(id); - assertNotNull(pipeline); - assertThat(pipeline.getId(), equalTo("_id")); - assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + - " id [_id] could not be loaded")); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertNull(pipeline.getProcessors().get(0).getTag()); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); - } - - public void testDelete() { - PipelineConfiguration config = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), notNullValue()); - - // Delete pipeline: - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), nullValue()); - - // Delete existing pipeline: - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); - } - } - - public void testDeleteUsingWildcard() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); - pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - assertThat(store.get("p2"), notNullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Delete pipeline matching wildcard - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Exception if we used name which does not exist - try { - store.innerDelete(new DeletePipelineRequest("unknown"), clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); - } - - // match all wildcard works on last remaining pipeline - DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(matchAllDeleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), nullValue()); - - // match all wildcard does not throw exception if none match - store.innerDelete(matchAllDeleteRequest, clusterState); - } - - public void testDeleteWithExistingUnmatchedPipelines() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); - } - } - - public void testGetPipelines() { - Map configs = new HashMap<>(); - configs.put("_id1", new PipelineConfiguration( - "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - configs.put("_id2", new PipelineConfiguration( - "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - - assertThat(store.innerGetPipelines(null, "_id1").isEmpty(), is(true)); - - IngestMetadata ingestMetadata = new IngestMetadata(configs); - List pipelines = store.innerGetPipelines(ingestMetadata, "_id1"); - assertThat(pipelines.size(), equalTo(1)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id1", "_id2"); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - // get all variants: (no IDs or '*') - pipelines = store.innerGetPipelines(ingestMetadata); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - } - - public void testCrud() throws Exception { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty - - PutPipelineRequest putRequest = new PutPipelineRequest(id, - new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, nullValue()); - } - - public void testValidate() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + - "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), - XContentType.JSON); - - DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - Map ingestInfos = new HashMap<>(); - ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); - - ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> store.validatePipeline(ingestInfos, putRequest)); - assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); - assertEquals("remove", e.getMetadata("es.processor_type").get(0)); - assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); - - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - store.validatePipeline(ingestInfos, putRequest); - } - - public void testValidateNoIngestInfo() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - Exception e = expectThrows(IllegalStateException.class, () -> store.validatePipeline(Collections.emptyMap(), putRequest)); - assertEquals("Ingest info is empty", e.getMessage()); - - DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); - store.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); - } -} diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index c53d798f7b488..d413c0f0be229 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.node.MockNode; @@ -32,6 +33,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; public class IndexStorePluginTests extends ESTestCase { @@ -54,7 +56,30 @@ public Map> getIndexStoreFactories() } - public void testDuplicateIndexStoreProviders() { + public static class ConflictingStorePlugin extends Plugin implements IndexStorePlugin { + + public static final String TYPE; + + static { + TYPE = randomFrom(Arrays.asList(IndexModule.Type.values())).getSettingsKey(); + } + + @Override + public Map> getIndexStoreFactories() { + return Collections.singletonMap(TYPE, IndexStore::new); + } + + } + + public void testIndexStoreFactoryConflictsWithBuiltInIndexStoreType() { + final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + final IllegalStateException e = expectThrows( + IllegalStateException.class, () -> new MockNode(settings, Collections.singletonList(ConflictingStorePlugin.class))); + assertThat(e, hasToString(containsString( + "registered index store type [" + ConflictingStorePlugin.TYPE + "] conflicts with a built-in type"))); + } + + public void testDuplicateIndexStoreFactories() { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final IllegalStateException e = expectThrows( IllegalStateException.class, () -> new MockNode(settings, Arrays.asList(BarStorePlugin.class, FooStorePlugin.class))); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 5f1d1f612d7ad..f6649853eda10 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -590,10 +590,10 @@ public void testNonExtensibleDep() throws Exception { } public void testIncompatibleElasticsearchVersion() throws Exception { - PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_5_0_0, + PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_6_0_0, "1.8", "FakePlugin", Collections.emptyList(), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); - assertThat(e.getMessage(), containsString("was built for Elasticsearch version 5.0.0")); + assertThat(e.getMessage(), containsString("was built for Elasticsearch version 6.0.0")); } public void testIncompatibleJavaVersion() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java new file mode 100644 index 0000000000000..29b19739e7587 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + + +public class RestClusterGetSettingsActionTests extends ESTestCase { + + public void testFilterPersistentSettings() { + runTestFilterSettingsTest(MetaData.Builder::persistentSettings, ClusterGetSettingsResponse::getPersistentSettings); + } + + public void testFilterTransientSettings() { + runTestFilterSettingsTest(MetaData.Builder::transientSettings, ClusterGetSettingsResponse::getTransientSettings); + } + + private void runTestFilterSettingsTest( + final BiConsumer md, final Function s) { + final MetaData.Builder mdBuilder = new MetaData.Builder(); + final Settings settings = Settings.builder().put("foo.filtered", "bar").put("foo.non_filtered", "baz").build(); + md.accept(mdBuilder, settings); + final ClusterState.Builder builder = new ClusterState.Builder(ClusterState.EMPTY_STATE).metaData(mdBuilder); + final SettingsFilter filter = new SettingsFilter(Settings.EMPTY, Collections.singleton("foo.filtered")); + final Setting.Property[] properties = {Setting.Property.Dynamic, Setting.Property.Filtered, Setting.Property.NodeScope}; + final Set> settingsSet = Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + Stream.concat( + Stream.of(Setting.simpleString("foo.filtered", properties)), + Stream.of(Setting.simpleString("foo.non_filtered", properties)))) + .collect(Collectors.toSet()); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); + final ClusterGetSettingsResponse response = + RestClusterGetSettingsAction.response(builder.build(), randomBoolean(), filter, clusterSettings, Settings.EMPTY); + assertFalse(s.apply(response).hasValue("foo.filtered")); + assertTrue(s.apply(response).hasValue("foo.non_filtered")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java index 95a9ae9d707d4..36d2ef2c4db3b 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rescore.QueryRescorerBuilder; import java.io.IOException; import java.util.ArrayList; @@ -123,6 +125,17 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[size] cannot be [0] in a scroll context", validationErrors.validationErrors().get(0)); } + { + // Rescore is not allowed on scroll requests + SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); + searchRequest.source().addRescorer(new QueryRescorerBuilder(QueryBuilders.matchAllQuery())); + searchRequest.requestCache(false); + searchRequest.scroll(new TimeValue(1000)); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("using [rescore] is not allowed in a scroll context", validationErrors.validationErrors().get(0)); + } } public void testEqualsAndHashcode() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java index aa9068b651e9b..35f3175f7cfe5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketUtilsTests.java @@ -27,18 +27,14 @@ public class BucketUtilsTests extends ESTestCase { public void testBadInput() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> BucketUtils.suggestShardSideQueueSize(0, 10)); + () -> BucketUtils.suggestShardSideQueueSize(0, randomBoolean())); assertEquals(e.getMessage(), "size must be positive, got 0"); - - e = expectThrows(IllegalArgumentException.class, - () -> BucketUtils.suggestShardSideQueueSize(10, 0)); - assertEquals(e.getMessage(), "number of shards must be positive, got 0"); } public void testOptimizesSingleShard() { for (int iter = 0; iter < 10; ++iter) { final int size = randomIntBetween(1, Integer.MAX_VALUE); - assertEquals(size, BucketUtils.suggestShardSideQueueSize( size, 1)); + assertEquals(size, BucketUtils.suggestShardSideQueueSize( size, true)); } } @@ -46,7 +42,7 @@ public void testOverFlow() { for (int iter = 0; iter < 10; ++iter) { final int size = Integer.MAX_VALUE - randomInt(10); final int numberOfShards = randomIntBetween(1, 10); - final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards); + final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards == 1); assertThat(shardSize, greaterThanOrEqualTo(shardSize)); } } @@ -55,7 +51,7 @@ public void testShardSizeIsGreaterThanGlobalSize() { for (int iter = 0; iter < 10; ++iter) { final int size = randomIntBetween(1, Integer.MAX_VALUE); final int numberOfShards = randomIntBetween(1, 10); - final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards); + final int shardSize = BucketUtils.suggestShardSideQueueSize( size, numberOfShards == 1); assertThat(shardSize, greaterThanOrEqualTo(size)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 327a717f05c52..bdfdd4d028f0f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -178,4 +178,18 @@ public void testRewrite() throws IOException { assertSame(rewritten, rewritten.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L))); } + + public void testRewritePreservesOtherBucket() throws IOException { + FiltersAggregationBuilder originalFilters = new FiltersAggregationBuilder("my-agg", new BoolQueryBuilder()); + originalFilters.otherBucket(randomBoolean()); + originalFilters.otherBucketKey(randomAlphaOfLength(10)); + + AggregationBuilder rewritten = originalFilters.rewrite(new QueryRewriteContext(xContentRegistry(), + null, null, () -> 0L)); + assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class)); + + FiltersAggregationBuilder rewrittenFilters = (FiltersAggregationBuilder) rewritten; + assertEquals(originalFilters.otherBucket(), rewrittenFilters.otherBucket()); + assertEquals(originalFilters.otherBucketKey(), rewrittenFilters.otherBucketKey()); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index c50fb89f334af..ce45d222dd757 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -67,7 +67,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index fc080dd0f04c4..971742aec2d04 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -65,7 +65,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index 981d263d7d633..b7c5bf03ac552 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; @@ -28,7 +29,11 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -39,6 +44,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueHours; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.createRounding; +import static org.hamcrest.Matchers.equalTo; public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase { @@ -61,6 +68,7 @@ protected InternalAutoDateHistogram createTestInstance(String name, int nbBuckets = randomNumberOfBuckets(); int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1); List buckets = new ArrayList<>(nbBuckets); + long startingDate = System.currentTimeMillis(); long interval = randomIntBetween(1, 3); @@ -72,23 +80,41 @@ protected InternalAutoDateHistogram createTestInstance(String name, } InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + } + /* + This test was added to reproduce a bug where getAppropriateRounding was only ever using the first innerIntervals + passed in, instead of using the interval associated with the loop. + */ + public void testGetAppropriateRoundingUsesCorrectIntervals() { + RoundingInfo[] roundings = new RoundingInfo[6]; + DateTimeZone timeZone = DateTimeZone.UTC; + // Since we pass 0 as the starting index to getAppropriateRounding, we'll also use + // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() + // will be larger than the estimate. + roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, 1000); + roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), + 60 * 1000L, 1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, 1, 3, 12); - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); + // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function + // to increment the rounding (because the bug was that the function would not use the innerIntervals + // from the new rounding. + int result = InternalAutoDateHistogram.getAppropriateRounding(timestamp.toEpochSecond()*1000, + timestamp.plusDays(1).toEpochSecond()*1000, 0, roundings, 25); + assertThat(result, equalTo(2)); } @Override protected void assertReduced(InternalAutoDateHistogram reduced, List inputs) { - int roundingIdx = 0; - for (InternalAutoDateHistogram histogram : inputs) { - if (histogram.getBucketInfo().roundingIdx > roundingIdx) { - roundingIdx = histogram.getBucketInfo().roundingIdx; - } - } - RoundingInfo roundingInfo = roundingInfos[roundingIdx]; long lowest = Long.MAX_VALUE; long highest = 0; + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { long bucketKey = ((DateTime) bucket.getKey()).getMillis(); @@ -100,35 +126,72 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List= 0; j--) { + int interval = roundingInfo.innerIntervals[j]; + if (normalizedDuration / interval < reduced.getBuckets().size()) { + innerIntervalToUse = interval; + innerIntervalIndex = j; + } + } + } + + long intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + int bucketCount = getBucketCount(lowest, highest, roundingInfo, intervalInMillis); + + //Next, if our bucketCount is still above what we need, we'll go back and determine the interval + // based on a size calculation. + if (bucketCount > reduced.getBuckets().size()) { + for (int i = innerIntervalIndex; i < roundingInfo.innerIntervals.length; i++) { + long newIntervalMillis = roundingInfo.innerIntervals[i] * roundingInfo.getRoughEstimateDurationMillis(); + if (getBucketCount(lowest, highest, roundingInfo, newIntervalMillis) <= reduced.getBuckets().size()) { + innerIntervalToUse = roundingInfo.innerIntervals[i]; + intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + } } } + Map expectedCounts = new TreeMap<>(); - long intervalInMillis = innerIntervalToUse*roundingInfo.getRoughEstimateDurationMillis(); for (long keyForBucket = roundingInfo.rounding.round(lowest); - keyForBucket <= highest; + keyForBucket <= roundingInfo.rounding.round(highest); keyForBucket = keyForBucket + intervalInMillis) { expectedCounts.put(keyForBucket, 0L); + // Iterate through the input buckets, and for each bucket, determine if it's inside + // the range of the bucket in the outer loop. if it is, add the doc count to the total + // for that bucket. + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { - long bucketKey = ((DateTime) bucket.getKey()).getMillis(); - long roundedBucketKey = roundingInfo.rounding.round(bucketKey); + long roundedBucketKey = roundingInfo.rounding.round(((DateTime) bucket.getKey()).getMillis()); + long docCount = bucket.getDocCount(); if (roundedBucketKey >= keyForBucket && roundedBucketKey < keyForBucket + intervalInMillis) { - long count = bucket.getDocCount(); expectedCounts.compute(keyForBucket, - (key, oldValue) -> (oldValue == null ? 0 : oldValue) + count); + (key, oldValue) -> (oldValue == null ? 0 : oldValue) + docCount); } } } } + // If there is only a single bucket, and we haven't added it above, add a bucket with no documents. + // this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above. + if (roundingInfo.rounding.round(lowest) == roundingInfo.rounding.round(highest) && expectedCounts.isEmpty()) { + expectedCounts.put(roundingInfo.rounding.round(lowest), 0L); + } + + // pick out the actual reduced values to the make the assertion more readable Map actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), @@ -137,12 +200,16 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List instanceReader() { return InternalAutoDateHistogram::new; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 13e1489795996..f62598fa7c317 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; @@ -90,42 +91,63 @@ public static class CustomScriptPlugin extends MockScriptPlugin { protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); - scripts.put("_agg['count'] = 1", vars -> - aggScript(vars, agg -> ((Map) agg).put("count", 1))); + scripts.put("state['count'] = 1", vars -> + aggScript(vars, state -> state.put("count", 1))); - scripts.put("_agg.add(1)", vars -> - aggScript(vars, agg -> ((List) agg).add(1))); + scripts.put("state.list.add(1)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(1); + })); - scripts.put("_agg[param1] = param2", vars -> - aggScript(vars, agg -> ((Map) agg).put(XContentMapValues.extractValue("params.param1", vars), + scripts.put("state[param1] = param2", vars -> + aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), XContentMapValues.extractValue("params.param2", vars)))); - scripts.put("vars.multiplier = 3", vars -> - ((Map) vars.get("vars")).put("multiplier", 3)); + scripts.put("vars.multiplier = 3", vars -> { + ((Map) vars.get("vars")).put("multiplier", 3); + + Map state = (Map) vars.get("state"); + state.put("list", new ArrayList()); - scripts.put("_agg.add(vars.multiplier)", vars -> - aggScript(vars, agg -> ((List) agg).add(XContentMapValues.extractValue("vars.multiplier", vars)))); + return state; + }); + + scripts.put("state.list.add(vars.multiplier)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); + })); // Equivalent to: // // newaggregation = []; // sum = 0; // - // for (a in _agg) { - // sum += a + // for (s in state.list) { + // sum += s // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum agg values as a new aggregation", vars -> { + scripts.put("sum state values as a new aggregation", vars -> { List newAggregation = new ArrayList(); - List agg = (List) vars.get("_agg"); + Map state = (Map) vars.get("state"); + List list = (List) state.get("list"); - if (agg != null) { + if (list != null) { Integer sum = 0; - for (Object a : (List) agg) { - sum += ((Number) a).intValue(); + for (Object s : list) { + sum += ((Number) s).intValue(); } newAggregation.add(sum); } @@ -137,24 +159,41 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum aggs of agg values as a new aggregation", vars -> { + scripts.put("sum all states (lists) values as a new aggregation", vars -> { + List newAggregation = new ArrayList(); + Integer sum = 0; + + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); + } + } + } + newAggregation.add(sum); + return newAggregation; + }); + + scripts.put("sum all states' state.list values as a new aggregation", vars -> { List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (Map state : states) { + List list = (List) state.get("list"); + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -167,25 +206,25 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum * multiplier); // return newaggregation" // - scripts.put("multiplied sum aggs of agg values as a new aggregation", vars -> { + scripts.put("multiplied sum all states (lists) values as a new aggregation", vars -> { Integer multiplier = (Integer) vars.get("multiplier"); List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -193,53 +232,12 @@ protected Map, Object>> pluginScripts() { return newAggregation; }); - scripts.put("state.items = new ArrayList()", vars -> - aggContextScript(vars, state -> ((HashMap) state).put("items", new ArrayList()))); - - scripts.put("state.items.add(1)", vars -> - aggContextScript(vars, state -> { - HashMap stateMap = (HashMap) state; - List items = (List) stateMap.get("items"); - items.add(1); - })); - - scripts.put("sum context state values", vars -> { - int sum = 0; - HashMap state = (HashMap) vars.get("state"); - List items = (List) state.get("items"); - - for (Object x : items) { - sum += (Integer)x; - } - - return sum; - }); - - scripts.put("sum context states", vars -> { - Integer sum = 0; - - List states = (List) vars.get("states"); - for (Object state : states) { - sum += ((Number) state).intValue(); - } - - return sum; - }); - return scripts; } - static Object aggScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "_agg"); - } - - static Object aggContextScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "state"); - } - @SuppressWarnings("unchecked") - private static Object aggScript(Map vars, Consumer fn, String stateVarName) { - T aggState = (T) vars.get(stateVarName); + static Map aggScript(Map vars, Consumer> fn) { + Map aggState = (Map) vars.get("state"); fn.accept(aggState); return aggState; } @@ -285,17 +283,17 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().cluster().preparePutStoredScript() .setId("mapScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"_agg.add(vars.multiplier)\"} }"), XContentType.JSON)); + " \"source\": \"state.list.add(vars.multiplier)\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("combineScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum state values as a new aggregation\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("reduceScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum aggs of agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum all states (lists) values as a new aggregation\"} }"), XContentType.JSON)); indexRandom(true, builders); ensureSearchable(); @@ -315,9 +313,10 @@ public void setUp() throws Exception { // the name of the file script is used in test method while the source of the file script // must match a predefined script from CustomScriptPlugin.pluginScripts() method Files.write(scripts.resolve("init_script.mockscript"), "vars.multiplier = 3".getBytes("UTF-8")); - Files.write(scripts.resolve("map_script.mockscript"), "_agg.add(vars.multiplier)".getBytes("UTF-8")); - Files.write(scripts.resolve("combine_script.mockscript"), "sum agg values as a new aggregation".getBytes("UTF-8")); - Files.write(scripts.resolve("reduce_script.mockscript"), "sum aggs of agg values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("map_script.mockscript"), "state.list.add(vars.multiplier)".getBytes("UTF-8")); + Files.write(scripts.resolve("combine_script.mockscript"), "sum state values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("reduce_script.mockscript"), + "sum all states (lists) values as a new aggregation".getBytes("UTF-8")); } catch (IOException e) { throw new RuntimeException("failed to create scripts"); } @@ -329,7 +328,7 @@ protected Path nodeConfigPath(int nodeOrdinal) { } public void testMap() { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -365,52 +364,12 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testExplicitAggParam() { - Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); - - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); - - SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) - .get(); - assertSearchResponse(response); - assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - assertThat(numberValue, equalTo((Number) 1)); - totalCount += numberValue.longValue(); - } - } - assertThat(totalCount, equalTo(numDocs)); - } - - public void testMapWithParamsAndImplicitAggMap() { + public void testMapWithParams() { // Split the params up between the script and the aggregation. - // Don't put any _agg map in params. Map scriptParams = Collections.singletonMap("param1", "12"); Map aggregationParams = Collections.singletonMap("param2", 1); - // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -454,7 +413,6 @@ public void testInitMapWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -466,7 +424,7 @@ public void testInitMapWithParams() { .initScript( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) .mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "_agg.add(vars.multiplier)", Collections.emptyMap()))) + "state.list.add(vars.multiplier)", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -483,8 +441,11 @@ public void testInitMapWithParams() { long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; + assertThat(object, instanceOf(HashMap.class)); + Map map = (Map) object; + assertThat(map, hasKey("list")); + assertThat(map.get("list"), instanceOf(List.class)); + List list = (List) map.get("list"); for (Object o : list) { assertThat(o, notNullValue()); assertThat(o, instanceOf(Number.class)); @@ -501,12 +462,11 @@ public void testMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -553,13 +513,13 @@ public void testInitMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -607,15 +567,15 @@ public void testInitMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -652,15 +612,15 @@ public void testInitMapCombineReduceGetProperty() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -707,14 +667,14 @@ public void testMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -749,13 +709,13 @@ public void testInitMapReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -789,12 +749,12 @@ public void testMapReduceWithParams() { Map varsMap = new HashMap<>(); varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -828,18 +788,18 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Map reduceParams = new HashMap<>(); reduceParams.put("multiplier", 4); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "multiplied sum aggs of agg values as a new aggregation", reduceParams); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "multiplied sum all states (lists) values as a new aggregation", reduceParams); SearchResponse response = client() .prepareSearch("idx") @@ -875,7 +835,6 @@ public void testInitMapCombineReduceWithParamsStored() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -916,15 +875,15 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -977,15 +936,15 @@ public void testEmptyAggregation() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) @@ -1021,7 +980,7 @@ public void testEmptyAggregation() throws Exception { * not using a script does get cached. */ public void testDontCacheScripts() throws Exception { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get()); @@ -1047,7 +1006,7 @@ public void testDontCacheScripts() throws Exception { public void testConflictingAggAndScriptParams() { Map params = Collections.singletonMap("param1", "12"); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params); SearchRequestBuilder builder = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -1056,37 +1015,4 @@ public void testConflictingAggAndScriptParams() { SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); } - - public void testAggFromContext() { - Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items = new ArrayList()", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items.add(1)", Collections.emptyMap()); - Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context state values", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context states", - Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(Integer.class)); - Integer aggResult = (Integer) scriptedMetricAggregation.aggregation(); - long totalAgg = aggResult.longValue(); - assertThat(totalAgg, equalTo(numDocs)); - } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 952eb22848e1a..a74734c622f8d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -321,7 +321,7 @@ public void testIssue11119() throws Exception { assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -356,7 +356,7 @@ public void testIssue11119() throws Exception { assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java index 584208af4177c..70ddacf5698b2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java @@ -107,7 +107,7 @@ private static Object randomValue(Supplier[] valueTypes, int level) { /** * Mock of the script service. The script that is run looks at the - * "_aggs" parameter visible when executing the script and simply returns the count. + * "states" context variable visible when executing the script and simply returns the count. * This should be equal to the number of input InternalScriptedMetrics that are reduced * in total. */ @@ -116,7 +116,7 @@ protected ScriptService mockScriptService() { // mock script always retuns the size of the input aggs list as result @SuppressWarnings("unchecked") MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, - Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("_aggs")).size())); + Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("states")).size())); Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index 5124503fc0369..65e42556461a5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -83,72 +83,72 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { @SuppressWarnings("unchecked") public static void initMockScripts() { SCRIPTS.put("initScript", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScript", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(1); // just add 1 for each doc the script is run on - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(1); // just add 1 for each doc the script is run on + return state; }); SCRIPTS.put("combineScript", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).sum(); }); SCRIPTS.put("initScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(((Number) params.get("_score")).doubleValue()); - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(((Number) params.get("_score")).doubleValue()); + return state; }); SCRIPTS.put("combineScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); SCRIPTS.put("initScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer initialValue = (Integer)params.get("initialValue"); ArrayList collector = new ArrayList<>(); collector.add(initialValue); - agg.put("collector", collector); - return agg; + state.put("collector", collector); + return state; }); SCRIPTS.put("mapScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer itemValue = (Integer) params.get("itemValue"); - ((List) agg.get("collector")).add(itemValue); - return agg; + ((List) state.get("collector")).add(itemValue); + return state; }); SCRIPTS.put("combineScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); int divisor = ((Integer) params.get("divisor")); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); }); SCRIPTS.put("initScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + state.put("selfRef", state); + return state; }); SCRIPTS.put("mapScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("selfRef", state); + return state; }); SCRIPTS.put("combineScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("selfRef", state); + return state; }); } @@ -170,7 +170,7 @@ public void testNoDocs() throws IOException { } /** - * without combine script, the "_aggs" map should contain a list of the size of the number of documents matched + * without combine script, the "states" map should contain a list of the size of the number of documents matched */ public void testScriptedMetricWithoutCombine() throws IOException { try (Directory directory = newDirectory()) { diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 5cc4e2ddc68a7..7790e8d6576ca 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -78,6 +79,29 @@ public void testMultipleFiltering() throws IOException { assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap()); } + public void testNestedSource() throws IOException { + Map expectedNested = Collections.singletonMap("nested2", Collections.singletonMap("field", "value0")); + XContentBuilder source = XContentFactory.jsonBuilder().startObject() + .field("field", "value") + .field("field2", "value2") + .field("nested1", expectedNested) + .endObject(); + FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(expectedNested, hitContext.hit().getSourceAsMap()); + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.singletonMap("field", "value0"), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + } + public void testSourceDisabled() throws IOException { FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null); assertNull(hitContext.hit().getSourceAsMap()); @@ -96,17 +120,29 @@ public void testSourceDisabled() throws IOException { } private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { + return hitExecute(source, fetchSource, include, exclude, null); + } + + + private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude, + SearchHit.NestedIdentity nestedIdentity) { return hitExecuteMultiple(source, fetchSource, include == null ? Strings.EMPTY_ARRAY : new String[]{include}, - exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}, nestedIdentity); } private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) { + return hitExecuteMultiple(source, fetchSource, includes, excludes, null); + } + + private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes, + SearchHit.NestedIdentity nestedIdentity) { FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes); SearchContext searchContext = new FetchSourceSubPhaseTestSearchContext(fetchSourceContext, source == null ? null : BytesReference.bytes(source)); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - hitContext.reset(new SearchHit(1, null, null, null), null, 1, null); + final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null); + hitContext.reset(searchHit, null, 1, null); FetchSourceSubPhase phase = new FetchSourceSubPhase(); phase.hitExecute(searchContext, hitContext); return hitContext; diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 37359d9f20d71..50f2261c722a1 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -601,10 +601,10 @@ private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightB value = randomAlphaOfLengthBetween(1, 10); break; case 1: - value = new Integer(randomInt(1000)); + value = Integer.valueOf(randomInt(1000)); break; case 2: - value = new Boolean(randomBoolean()); + value = Boolean.valueOf(randomBoolean()); break; } options.put(randomAlphaOfLengthBetween(1, 10), value); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index aea0243a399d4..45b6340ba6f46 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -810,6 +810,32 @@ public void testDocValueFields() throws Exception { equalTo(new BytesRef(new byte[] {42, 100}))); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*field"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", + "binary_field", "ip_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); + assertThat(dateField.toInstant().toEpochMilli(), equalTo(date.toInstant().toEpochMilli())); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), + equalTo(new BytesRef(new byte[] {42, 100}))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) .addDocValueField("text_field", "use_field_mapping") .addDocValueField("keyword_field", "use_field_mapping") @@ -977,6 +1003,70 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { assertThat(fetchedDate, equalTo(date)); } + public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", false) + .endObject() + .startObject("properties") + .startObject("text_field") + .field("type", "text") + .field("fielddata", true) + .endObject() + .startObject("date_field") + .field("type", "date") + .field("format", "yyyy-MM-dd") + .endObject() + .startObject("text_field_alias") + .field("type", "alias") + .field("path", "text_field") + .endObject() + .startObject("date_field_alias") + .field("type", "alias") + .field("path", "date_field") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test").addMapping("type", mapping)); + ensureGreen("test"); + + ZonedDateTime date = ZonedDateTime.of(1990, 12, 29, 0, 0, 0, 0, ZoneOffset.UTC); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ROOT); + + index("test", "type", "1", "text_field", "foo", "date_field", formatter.format(date)); + refresh("test"); + + SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*alias", "use_field_mapping") + .addDocValueField("date_field"); + SearchResponse searchResponse = builder.execute().actionGet(); + + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), + equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + + ZonedDateTime fetchedDate = dateField.getValue(); + assertThat(fetchedDate, equalTo(date)); + } + public void testStoredFieldsWithFieldAlias() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index a21893db3920f..0a860a636d4aa 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -613,7 +613,7 @@ public void testDateWithoutOrigin() throws Exception { } public void testManyDocsLin() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("test").field("type", "text").endObject().startObject("date").field("type", "date") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 12a64d80a1489..80b40042801b5 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -51,7 +51,7 @@ protected Collection> nodePlugins() { } public void testSimpleBoundingBoxTest() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -123,7 +123,7 @@ public void testSimpleBoundingBoxTest() throws Exception { } public void testLimit2BoundingBox() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -176,7 +176,7 @@ public void testLimit2BoundingBox() throws Exception { } public void testCompleteLonRange() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 5966ea6a49dcc..143fd611c3f5e 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -101,7 +101,7 @@ static Double distanceScript(Map vars, Function> nodePlugins() { @Override protected void setupSuiteScopeCluster() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 16365d829a83b..872267417c37d 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -103,6 +104,7 @@ private void countTestCase(Query query, IndexReader reader, boolean shouldCollec final boolean rescore = QueryPhase.execute(context, searcher, checkCancelled -> {}); assertFalse(rescore); assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); } private void countTestCase(boolean withDeletions) throws Exception { @@ -172,11 +174,14 @@ public void testPostFilterDisablesCountOptimization() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); contextSearcher = new IndexSearcher(reader); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(0, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); reader.close(); dir.close(); } @@ -205,13 +210,13 @@ public void testTerminateAfterWithFilter() throws Exception { context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } reader.close(); dir.close(); } - public void testMinScoreDisablesCountOptimization() throws Exception { Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); @@ -230,11 +235,13 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); contextSearcher = new IndexSearcher(reader); context.minimumScore(100); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(0, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); reader.close(); dir.close(); } @@ -289,6 +296,7 @@ public void testInOrderScrollOptimization() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); @@ -296,9 +304,11 @@ public void testInOrderScrollOptimization() throws Exception { contextSearcher = getAssertingEarlyTerminationSearcher(reader, size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(size)); assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(size)); reader.close(); dir.close(); @@ -334,12 +344,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } @@ -348,6 +360,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } { @@ -360,6 +373,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); @@ -367,6 +381,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } { @@ -376,6 +391,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(collector.getTotalHits(), equalTo(1)); context.queryCollectors().clear(); @@ -387,6 +403,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } @@ -539,19 +556,19 @@ public void testIndexSortScrollOptimization() throws Exception { dir.close(); } - static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { + private static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { return new IndexSearcher(reader) { protected void search(List leaves, Weight weight, Collector collector) throws IOException { - final Collector in = new AssertingEalyTerminationFilterCollector(collector, size); + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); super.search(leaves, weight, in); } }; } - private static class AssertingEalyTerminationFilterCollector extends FilterCollector { + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { private final int size; - AssertingEalyTerminationFilterCollector(Collector in, int size) { + AssertingEarlyTerminationFilterCollector(Collector in, int size) { super(in); this.size = size; } diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index 5caab8c9dfec6..a90e98a38eefc 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -430,8 +430,8 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { indexRequests.add(client().prepareIndex("test", "_doc", "1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); - // The wildcard field matches aliases for both a text and boolean field. - // By default, the boolean field should be ignored when building the query. + // The wildcard field matches aliases for both a text and geo_point field. + // By default, the geo_point field should be ignored when building the query. SearchResponse response = client().prepareSearch("test") .setQuery(queryStringQuery("text").field("f*_alias")) .execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 31fcfa7155cc0..4005f1218a92f 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -198,6 +199,8 @@ private int createIndex(boolean singleShard) throws Exception { } // no replicas, as they might be ordered differently settings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0); + // we need to control refreshes as they might take different merges into account + settings.put("index.refresh_interval", -1); assertAcked(prepareCreate("test").setSettings(settings.build()).get()); final int numDocs = randomIntBetween(10, 200); diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 965dcb3e8ccf1..e134b20c309f4 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -59,7 +59,7 @@ protected Collection> nodePlugins() { } public void testDistanceSortingMVFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -189,7 +189,7 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -234,7 +234,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties") @@ -383,7 +383,7 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 200043a6668ab..cac5fede848a4 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -70,7 +70,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -136,7 +136,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -197,7 +197,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index d95db778a6a3f..44c49ace5de8f 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -493,15 +493,24 @@ public void testGeoNeighbours() throws Exception { } public void testGeoField() throws Exception { -// Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5); -// Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder mapping = jsonBuilder(); mapping.startObject(); mapping.startObject(TYPE); mapping.startObject("properties"); + mapping.startObject("location"); + mapping.startObject("properties"); mapping.startObject("pin"); mapping.field("type", "geo_point"); + // Enable store and disable indexing sometimes + if (randomBoolean()) { + mapping.field("store", "true"); + } + if (randomBoolean()) { + mapping.field("index", "false"); + } + mapping.endObject(); // pin mapping.endObject(); + mapping.endObject(); // location mapping.startObject(FIELD); mapping.field("type", "completion"); mapping.field("analyzer", "simple"); @@ -510,7 +519,7 @@ public void testGeoField() throws Exception { mapping.startObject(); mapping.field("name", "st"); mapping.field("type", "geo"); - mapping.field("path", "pin"); + mapping.field("path", "location.pin"); mapping.field("precision", 5); mapping.endObject(); mapping.endArray(); @@ -524,7 +533,9 @@ public void testGeoField() throws Exception { XContentBuilder source1 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.529172, 13.407333) + .endObject() .startObject(FIELD) .array("input", "Hotel Amsterdam in Berlin") .endObject() @@ -533,7 +544,9 @@ public void testGeoField() throws Exception { XContentBuilder source2 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.363389, 4.888695) + .endObject() .startObject(FIELD) .array("input", "Hotel Berlin in Amsterdam") .endObject() @@ -600,6 +613,7 @@ public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBu private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder); } + private void createIndexAndMappingAndSettings(Settings settings, CompletionMappingBuilder completionMappingBuilder) throws IOException { XContentBuilder mapping = jsonBuilder().startObject() .startObject(TYPE).startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index 56ff157ec718d..a745384eb3edb 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest.completion; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -200,6 +201,70 @@ public void testIndexingWithMultipleContexts() throws Exception { assertContextSuggestFields(fields, 3); } + public void testMalformedGeoField() throws Exception { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject(); + mapping.startObject("type1"); + mapping.startObject("properties"); + mapping.startObject("pin"); + String type = randomFrom("text", "keyword", "long"); + mapping.field("type", type); + mapping.endObject(); + mapping.startObject("suggestion"); + mapping.field("type", "completion"); + mapping.field("analyzer", "simple"); + + mapping.startArray("contexts"); + mapping.startObject(); + mapping.field("name", "st"); + mapping.field("type", "geo"); + mapping.field("path", "pin"); + mapping.field("precision", 5); + mapping.endObject(); + mapping.endArray(); + + mapping.endObject(); + + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, + () -> createIndex("test", Settings.EMPTY, "type1", mapping)); + + assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] must be mapped to geo_point, found [" + type + "]")); + } + + public void testMissingGeoField() throws Exception { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject(); + mapping.startObject("type1"); + mapping.startObject("properties"); + mapping.startObject("suggestion"); + mapping.field("type", "completion"); + mapping.field("analyzer", "simple"); + + mapping.startArray("contexts"); + mapping.startObject(); + mapping.field("name", "st"); + mapping.field("type", "geo"); + mapping.field("path", "pin"); + mapping.field("precision", 5); + mapping.endObject(); + mapping.endArray(); + + mapping.endObject(); + + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, + () -> createIndex("test", Settings.EMPTY, "type1", mapping)); + + assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] is not defined in the mapping")); + } + public void testParsingQueryContextBasic() throws Exception { XContentBuilder builder = jsonBuilder().value("ezs42e44yx96"); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index 3c099c32bde2d..bff5a2b122d2f 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -159,7 +159,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertFalse(connection.isClosed()); assertTrue(connectionManager.nodeConnected(node)); assertSame(connection, connectionManager.getConnection(node)); - assertEquals(1, connectionManager.connectedNodeCount()); + assertEquals(1, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); @@ -169,7 +169,7 @@ public void onNodeDisconnected(DiscoveryNode node) { connection.close(); } assertTrue(connection.isClosed()); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(1, nodeDisconnectedCount.get()); } @@ -205,7 +205,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertTrue(connection.isClosed()); assertFalse(connectionManager.nodeConnected(node)); expectThrows(NodeNotConnectedException.class, () -> connectionManager.getConnection(node)); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(0, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 8cfec0a07f910..34e22fd20de7f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -81,13 +81,15 @@ public void testEnsureWeReconnect() throws Exception { try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { Semaphore semaphore = new Semaphore(1); service.start(); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (remoteNode.equals(node)) { - semaphore.release(); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (remoteNode.equals(node)) { + semaphore.release(); + } } - } + }); }); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -95,7 +97,9 @@ public void onNodeDisconnected(DiscoveryNode node) { for (int i = 0; i < 10; i++) { semaphore.acquire(); try { - service.disconnectFromNode(remoteNode); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().disconnectFromNode(remoteNode); + }); semaphore.acquire(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 8da4064d1c89a..88b01c66898a0 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.transport; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -51,6 +54,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -144,7 +148,7 @@ public static MockTransportService startTransport( } } - public void testLocalProfileIsUsedForLocalCluster() throws Exception { + public void testRemoteProfileIsUsedForLocalCluster() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { @@ -158,8 +162,8 @@ public void testLocalProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -174,9 +178,12 @@ public ClusterSearchShardsResponse read(StreamInput in) throws IOException { }); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) .build(); - service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), - options, futureHandler); - futureHandler.txGet(); + IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { + service.sendRequest(discoverableNode, + ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), options, futureHandler); + futureHandler.txGet(); + }).getCause(); + assertEquals(ise.getMessage(), "can't select channel size is 0 for types: [RECOVERY, BULK, STATE]"); } } } @@ -198,8 +205,8 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -254,8 +261,8 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -276,14 +283,14 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(incompatibleTransport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(incompatibleSeedNode, seedNode); + List> seedNodes = Arrays.asList(() -> incompatibleSeedNode, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, seedNodes); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -310,8 +317,8 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertFalse(service.nodeConnected(spareNode)); @@ -359,8 +366,9 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, + n -> n.equals(rejectedNode) == false)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -373,15 +381,19 @@ public void testFilterDiscoveredNodes() throws Exception { } } } + private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes) throws Exception { + updateSeedNodes(connection, seedNodes, null); + } - private void updateSeedNodes(RemoteClusterConnection connection, List seedNodes) throws Exception { + private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes, String proxyAddress) + throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { exceptionAtomicReference.set(x); latch.countDown(); }); - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(proxyAddress, seedNodes, listener); latch.await(); if (exceptionAtomicReference.get() != null) { throw exceptionAtomicReference.get(); @@ -398,8 +410,8 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(seedNode))); + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); } @@ -461,7 +473,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -504,7 +516,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -512,7 +524,7 @@ public void run() { exceptionReference.set(x); listenerCalled.countDown(); }); - connection.updateSeedNodes(Arrays.asList(seedNode), listener); + connection.updateSeedNodes(null, Arrays.asList(() -> seedNode), listener); acceptedLatch.await(); connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on assertTrue(connection.assertNoRunningConnections()); @@ -539,9 +551,9 @@ public void testFetchShards() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { updateSeedNodes(connection, nodes); } @@ -579,9 +591,9 @@ public void testFetchShardsThreadContextHeader() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -635,7 +647,7 @@ public void testFetchShardsSkipUnavailable() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") @@ -738,14 +750,14 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads); @@ -782,7 +794,7 @@ public void run() { throw new AssertionError(x); } }); - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(null, seedNodes, listener); } latch.await(); } catch (Exception ex) { @@ -816,14 +828,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -870,7 +882,7 @@ public void run() { } }); try { - connection.updateSeedNodes(seedNodes, listener); + connection.updateSeedNodes(null, seedNodes, listener); } catch (Exception e) { // it's ok if we're shutting down assertThat(e.getMessage(), containsString("threadcontext is already closed")); @@ -904,7 +916,7 @@ public void testGetConnectionInfo() throws Exception { knownNodes.add(transport3.getLocalDiscoNode()); knownNodes.add(transport2.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(node3, node1, node2); + List> seedNodes = Arrays.asList(() -> node3, () -> node1, () -> node2); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -912,7 +924,7 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true)) { + seedNodes, service, service.connectionManager(), maxNumConnections, n -> true)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -1059,7 +1071,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1108,9 +1120,9 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -1142,21 +1154,21 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted List discoverableTransports = new CopyOnWriteArrayList<>(); try { final int numDiscoverableNodes = randomIntBetween(5, 20); - List discoverableNodes = new ArrayList<>(numDiscoverableNodes); - for (int i = 0; i < numDiscoverableNodes; i++) { + List> discoverableNodes = new ArrayList<>(numDiscoverableNodes); + for (int i = 0; i < numDiscoverableNodes; i++ ) { MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT); - discoverableNodes.add(transportService.getLocalDiscoNode()); + discoverableNodes.add(transportService::getLocalDiscoNode); discoverableTransports.add(transportService); } - List seedNodes = randomSubsetOf(discoverableNodes); + List> seedNodes = randomSubsetOf(discoverableNodes); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1198,7 +1210,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted discoverableTransports.add(transportService); connection.addConnectedNode(transportService.getLocalDiscoNode()); } else { - DiscoveryNode node = randomFrom(discoverableNodes); + DiscoveryNode node = randomFrom(discoverableNodes).get(); connection.onNodeDisconnected(node); } } @@ -1246,12 +1258,13 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList( () -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); - List discoveryNodes = Arrays.asList(otherClusterTransport.getLocalDiscoNode(), seedNode); + List> discoveryNodes = + Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode(), () -> seedNode); Collections.shuffle(discoveryNodes, random()); updateSeedNodes(connection, discoveryNodes); assertTrue(service.nodeConnected(seedNode)); @@ -1262,7 +1275,7 @@ public void testClusterNameIsChecked() throws Exception { assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> - updateSeedNodes(connection, Arrays.asList(otherClusterTransport.getLocalDiscoNode()))); + updateSeedNodes(connection, Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode()))); assertThat(illegalStateException.getMessage(), startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" + " - {other_cluster_discoverable_node}")); @@ -1325,7 +1338,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(connectedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1333,9 +1346,9 @@ public void close() { assertSame(seedConnection, remoteConnection); } for (int i = 0; i < 10; i++) { - //always a direct connection as the remote node is already connected + // we don't use the transport service connection manager so we will get a proxy connection for the local node Transport.Connection remoteConnection = connection.getConnection(service.getLocalNode()); - assertThat(remoteConnection, not(instanceOf(RemoteClusterConnection.ProxyConnection.class))); + assertThat(remoteConnection, instanceOf(RemoteClusterConnection.ProxyConnection.class)); assertThat(remoteConnection.getNode(), equalTo(service.getLocalNode())); } for (int i = 0; i < 10; i++) { @@ -1348,4 +1361,127 @@ public void close() { } } } + + public void testLazyResolveTransportAddress() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + CountDownLatch multipleResolveLatch = new CountDownLatch(2); + Supplier seedSupplier = () -> { + multipleResolveLatch.countDown(); + return seedNode; + }; + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(seedSupplier)); + // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes + // being called again so we try to resolve the same seed node's host twice + discoverableTransport.close(); + seedTransport.close(); + assertTrue(multipleResolveLatch.await(30L, TimeUnit.SECONDS)); + } + } + } + } + + public void testProxyMode() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("node_0", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("node_1", knownNodes, Version.CURRENT)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + final String proxyAddress = "1.1.1.1:99"; + Map nodes = new HashMap<>(); + nodes.put("node_0", seedTransport.getLocalDiscoNode()); + nodes.put("node_1", discoverableTransport.getLocalDiscoNode()); + Transport mockTcpTransport = getProxyTransport(threadPool, Collections.singletonMap(proxyAddress, nodes)); + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, mockTcpTransport, Version.CURRENT, + threadPool, null, Collections.emptySet())) { + service.start(); + service.acceptIncomingRequests(); + Supplier seedSupplier = () -> + RemoteClusterAware.buildSeedNode("some-remote-cluster", "node_0:" + randomIntBetween(1, 10000), true); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, proxyAddress)) { + updateSeedNodes(connection, Arrays.asList(seedSupplier), proxyAddress); + assertEquals(2, connection.getNumNodesConnected()); + assertNotNull(connection.getConnection(discoverableTransport.getLocalDiscoNode())); + assertNotNull(connection.getConnection(seedTransport.getLocalDiscoNode())); + assertEquals(proxyAddress, connection.getConnection(seedTransport.getLocalDiscoNode()) + .getNode().getAddress().toString()); + assertEquals(proxyAddress, connection.getConnection(discoverableTransport.getLocalDiscoNode()) + .getNode().getAddress().toString()); + service.getConnectionManager().disconnectFromNode(knownNodes.get(0)); + // ensure we reconnect + assertBusy(() -> { + assertEquals(2, connection.getNumNodesConnected()); + }); + discoverableTransport.close(); + seedTransport.close(); + } + } + } + } + + public static Transport getProxyTransport(ThreadPool threadPool, Map> nodeMap) { + if (nodeMap.isEmpty()) { + throw new IllegalArgumentException("nodeMap must be non-empty"); + } + + StubbableTransport stubbableTransport = new StubbableTransport(MockTransportService.newMockTransport(Settings.EMPTY, Version + .CURRENT, threadPool)); + stubbableTransport.setDefaultConnectBehavior((t, node, profile) -> { + Map proxyMapping = nodeMap.get(node.getAddress().toString()); + if (proxyMapping == null) { + throw new IllegalStateException("no proxy mapping for node: " + node); + } + DiscoveryNode proxyNode = proxyMapping.get(node.getName()); + if (proxyNode == null) { + // this is a seednode - lets pick one randomly + assertEquals("seed node must not have a port in the hostname: " + node.getHostName(), + -1, node.getHostName().lastIndexOf(':')); + assertTrue("missing hostname: " + node, proxyMapping.containsKey(node.getHostName())); + // route by seed hostname + proxyNode = proxyMapping.get(node.getHostName()); + } + Transport.Connection connection = t.openConnection(proxyNode, profile); + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return node; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + connection.sendRequest(requestId, action, request, options); + } + + @Override + public void addCloseListener(ActionListener listener) { + connection.addCloseListener(listener); + } + + @Override + public boolean isClosed() { + return connection.isClosed(); + } + + @Override + public void close() { + connection.close(); + } + }; + }); + return stubbableTransport; + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 03d76b5a953c6..9d42b4e458dbe 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -25,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -54,6 +56,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; @@ -103,27 +106,49 @@ public void testRemoteClusterSeedSetting() { .put("search.remote.foo.seeds", "192.168.0.1").build(); expectThrows(IllegalArgumentException.class, () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings))); + + Settings brokenPortSettings = Settings.builder() + .put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build(); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings) + .forEach(setting -> setting.get(brokenPortSettings)) + ); + assertEquals("failed to parse port", e.getMessage()); } - public void testBuiltRemoteClustersSeeds() throws Exception { - Map> map = RemoteClusterService.buildRemoteClustersSeeds( - Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build()); - assertEquals(2, map.size()); + public void testBuildRemoteClustersDynamicConfig() throws Exception { + Map>>> map = RemoteClusterService.buildRemoteClustersDynamicConfig( + Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080") + .put("search.remote.bar.seeds", "[::1]:9090") + .put("search.remote.boom.seeds", "boom-node1.internal:1000") + .put("search.remote.boom.proxy", "foo.bar.com:1234").build()); + assertEquals(3, map.size()); assertTrue(map.containsKey("foo")); assertTrue(map.containsKey("bar")); - assertEquals(1, map.get("foo").size()); - assertEquals(1, map.get("bar").size()); - - DiscoveryNode foo = map.get("foo").get(0); + assertTrue(map.containsKey("boom")); + assertEquals(1, map.get("foo").v2().size()); + assertEquals(1, map.get("bar").v2().size()); + assertEquals(1, map.get("boom").v2().size()); + DiscoveryNode foo = map.get("foo").v2().get(0).get(); + assertEquals("", map.get("foo").v1()); assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080))); assertEquals(foo.getId(), "foo#192.168.0.1:8080"); assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - DiscoveryNode bar = map.get("bar").get(0); + DiscoveryNode bar = map.get("bar").v2().get(0).get(); assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090))); assertEquals(bar.getId(), "bar#[::1]:9090"); + assertEquals("", map.get("bar").v1()); assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); + + DiscoveryNode boom = map.get("boom").v2().get(0).get(); + assertEquals(boom.getAddress(), new TransportAddress(TransportAddress.META_ADDRESS, 0)); + assertEquals("boom-node1.internal", boom.getHostName()); + assertEquals(boom.getId(), "boom#boom-node1.internal:1000"); + assertEquals("foo.bar.com:1234", map.get("boom").v1()); + assertEquals(boom.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); } @@ -194,17 +219,17 @@ public void testIncrementallyAddClusters() throws IOException { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().address())); + service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); - service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().address())); + service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); - service.updateRemoteCluster("cluster_2", Collections.emptyList()); + service.updateRemoteCluster("cluster_2", Collections.emptyList(), null); assertFalse(service.isRemoteClusterRegistered("cluster_2")); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, - () -> service.updateRemoteCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList())); + () -> service.updateRemoteCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList(), null)); assertEquals("remote clusters must not have the empty string as its key", iae.getMessage()); } } @@ -252,22 +277,17 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); @@ -278,6 +298,7 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -321,22 +342,17 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); @@ -347,6 +363,7 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -398,22 +415,17 @@ public void testCollectNodes() throws InterruptedException, IOException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); @@ -584,14 +596,16 @@ public void testCollectSearchShards() throws Exception { } CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (disconnectedNodes.remove(node)) { - disconnectedLatch.countDown(); + for (RemoteClusterConnection connection : remoteClusterService.getConnections()) { + connection.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (disconnectedNodes.remove(node)) { + disconnectedLatch.countDown(); + } } - } - }); + }); + } for (DiscoveryNode disconnectedNode : disconnectedNodes) { service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); @@ -669,6 +683,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertTrue(shardsResponse != ClusterSearchShardsResponse.EMPTY); } } + assertEquals(0, service.getConnectionManager().size()); } } } finally { @@ -821,10 +836,77 @@ public void testGetNodePredicatesCombination() { allRoles, Version.CURRENT); assertTrue(nodePredicate.test(node)); } - { - DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"), - allRoles, Version.V_5_3_0); - assertFalse(nodePredicate.test(node)); + } + + public void testRemoteClusterWithProxy() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster_1_node0 = startTransport("cluster_1_node0", knownNodes, Version.CURRENT); + MockTransportService cluster_1_node_1 = startTransport("cluster_1_node1", knownNodes, Version.CURRENT); + MockTransportService cluster_2_node0 = startTransport("cluster_2_node0", Collections.emptyList(), Version.CURRENT)) { + knownNodes.add(cluster_1_node0.getLocalDiscoNode()); + knownNodes.add(cluster_1_node_1.getLocalDiscoNode()); + String cluster1Proxy = "1.1.1.1:99"; + String cluster2Proxy = "2.2.2.2:99"; + Map nodesCluster1 = new HashMap<>(); + nodesCluster1.put("cluster_1_node0", cluster_1_node0.getLocalDiscoNode()); + nodesCluster1.put("cluster_1_node1", cluster_1_node_1.getLocalDiscoNode()); + Map> mapping = new HashMap<>(); + mapping.put(cluster1Proxy, nodesCluster1); + mapping.put(cluster2Proxy, Collections.singletonMap("cluster_2_node0", cluster_2_node0.getLocalDiscoNode())); + + Collections.shuffle(knownNodes, random()); + Transport proxyTransport = RemoteClusterConnectionTests.getProxyTransport(threadPool, mapping); + try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, proxyTransport, + Version.CURRENT, threadPool, null, Collections.emptySet());) { + transportService.start(); + transportService.acceptIncomingRequests(); + Settings.Builder builder = Settings.builder(); + builder.putList("search.remote.cluster_1.seeds", "cluster_1_node0:8080"); + builder.put("search.remote.cluster_1.proxy", cluster1Proxy); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + updateRemoteCluster(service, "cluster_1", Collections.singletonList("cluster_1_node1:8081"), cluster1Proxy); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + assertFalse(service.isRemoteClusterRegistered("cluster_2")); + updateRemoteCluster(service, "cluster_2", Collections.singletonList("cluster_2_node0:9300"), cluster2Proxy); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + assertTrue(service.isRemoteClusterRegistered("cluster_2")); + List infos = service.getRemoteConnectionInfos().collect(Collectors.toList()); + for (RemoteConnectionInfo info : infos) { + switch (info.clusterAlias) { + case "cluster_1": + assertEquals(2, info.numNodesConnected); + break; + case "cluster_2": + assertEquals(1, info.numNodesConnected); + break; + default: + fail("unknown cluster: " + info.clusterAlias); + } + } + service.updateRemoteCluster("cluster_2", Collections.emptyList(), randomBoolean() ? cluster2Proxy : null); + assertFalse(service.isRemoteClusterRegistered("cluster_2")); + } + } + } + } + + private void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) + throws Exception { + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionAtomicReference = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { + exceptionAtomicReference.set(x); + latch.countDown(); + }); + service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, listener); + latch.await(); + if (exceptionAtomicReference.get() != null) { + throw exceptionAtomicReference.get(); } } } diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index a3d2e1bbc574e..0bf12ba82c821 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -156,19 +156,26 @@ public void testEnsureVersionCompatibility() { TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), Version.CURRENT, randomBoolean()); - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), true); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), true); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [5.0.0] minimal compatible version is: [5.6.0]", ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [6.5.0]", ise.getMessage()); + // For handshake we are compatible with N-2 + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), true); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [6.5.0]", ise.getMessage()); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", + ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", ise.getMessage()); } @@ -188,7 +195,7 @@ protected FakeChannel bind(String name, InetSocketAddress address) throws IOExce } @Override - protected FakeChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected FakeChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { return new FakeChannel(messageCaptor); } diff --git a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json index abdc11928229f..9ab8995813e33 100644 --- a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json +++ b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -46,10 +46,6 @@ "format": "yyyy/MM/dd||epoch_millis" }, "f_bool": {"type": "boolean"}, - "f_bool_alias": { - "type": "alias", - "path": "f_bool" - }, "f_byte": {"type": "byte"}, "f_short": {"type": "short"}, "f_int": {"type": "integer"}, @@ -60,6 +56,10 @@ "f_binary": {"type": "binary"}, "f_suggest": {"type": "completion"}, "f_geop": {"type": "geo_point"}, + "f_geop_alias": { + "type": "alias", + "path": "f_geop" + }, "f_geos": {"type": "geo_shape"} } } diff --git a/settings.gradle b/settings.gradle index bdd866e622bcd..dedf3520bbbcd 100644 --- a/settings.gradle +++ b/settings.gradle @@ -78,12 +78,18 @@ addSubProjects('', new File(rootProject.projectDir, 'plugins')) addSubProjects('', new File(rootProject.projectDir, 'qa')) addSubProjects('', new File(rootProject.projectDir, 'x-pack')) -boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') +List startTasks = gradle.startParameter.taskNames +boolean isEclipse = + System.getProperty("eclipse.launcher") != null || // Detects gradle launched from the Eclipse IDE + System.getProperty("eclipse.application") != null || // Detects gradle launched from the Eclipse compiler server + startTasks.contains("eclipse") || // Detects gradle launched from the command line to do Eclipse stuff + startTasks.contains("cleanEclipse"); if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' projects << 'libs:core-tests' + projects << 'libs:dissect-tests' projects << 'libs:nio-tests' projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' @@ -103,6 +109,10 @@ if (isEclipse) { project(":libs:core").buildFileName = 'eclipse-build.gradle' project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') project(":libs:core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main') + project(":libs:dissect").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test') + project(":libs:dissect-tests").buildFileName = 'eclipse-build.gradle' project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') project(":libs:nio").buildFileName = 'eclipse-build.gradle' project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') diff --git a/test/framework/build.gradle b/test/framework/build.gradle index ab513a1b0bb21..8179e3d096a1f 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks; - dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" @@ -41,9 +38,7 @@ compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures', 'es-test-signatures' } // TODO: should we have licenses for our test deps? diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 35dac2e99e00d..c50e7cf066b81 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -177,8 +177,11 @@ public boolean implies(ProtectionDomain domain, Permission permission) { private static void addClassCodebase(Map codebases, String name, String classname) { try { Class clazz = BootstrapForTesting.class.getClassLoader().loadClass(classname); - if (codebases.put(name, clazz.getProtectionDomain().getCodeSource().getLocation()) != null) { - throw new IllegalStateException("Already added " + name + " codebase for testing"); + URL location = clazz.getProtectionDomain().getCodeSource().getLocation(); + if (location.toString().endsWith(".jar") == false) { + if (codebases.put(name, location) != null) { + throw new IllegalStateException("Already added " + name + " codebase for testing"); + } } } catch (ClassNotFoundException e) { // no class, fall through to not add. this can happen for any tests that do not include diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 3c8f3497871b4..b558cd1ba9000 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -478,7 +478,7 @@ private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFact } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); - internalEngine.recoverFromTranslog(); + internalEngine.recoverFromTranslog(Long.MAX_VALUE); return internalEngine; } @@ -808,7 +808,7 @@ public static Set getDocIds(Engine engine, boolean refresh) throws IOExc public static List readAllOperationsInLucene(Engine engine, MapperService mapper) throws IOException { final List operations = new ArrayList<>(); long maxSeqNo = Math.max(0, ((InternalEngine)engine).getLocalCheckpointTracker().getMaxSeqNo()); - try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapper, 0, maxSeqNo, false)) { + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapper, 0, maxSeqNo, false)) { Translog.Operation op; while ((op = snapshot.next()) != null){ operations.add(op); diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1e6f8a17ea0fe..f2afdff9c3a3a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; @@ -206,26 +207,23 @@ public int appendDocs(final int numOfDoc) throws Exception { } public BulkItemResponse index(IndexRequest indexRequest) throws Exception { - PlainActionFuture listener = new PlainActionFuture<>(); - final ActionListener wrapBulkListener = ActionListener.wrap( - bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), - listener::onFailure); - BulkItemRequest[] items = new BulkItemRequest[1]; - items[0] = new BulkItemRequest(0, indexRequest); - BulkShardRequest request = new BulkShardRequest(shardId, indexRequest.getRefreshPolicy(), items); - new IndexingAction(request, wrapBulkListener, this).execute(); - return listener.get(); + return executeWriteRequest(indexRequest, indexRequest.getRefreshPolicy()); } public BulkItemResponse delete(DeleteRequest deleteRequest) throws Exception { + return executeWriteRequest(deleteRequest, deleteRequest.getRefreshPolicy()); + } + + private BulkItemResponse executeWriteRequest( + DocWriteRequest writeRequest, WriteRequest.RefreshPolicy refreshPolicy) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); final ActionListener wrapBulkListener = ActionListener.wrap( bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0]), listener::onFailure); BulkItemRequest[] items = new BulkItemRequest[1]; - items[0] = new BulkItemRequest(0, deleteRequest); - BulkShardRequest request = new BulkShardRequest(shardId, deleteRequest.getRefreshPolicy(), items); - new IndexingAction(request, wrapBulkListener, this).execute(); + items[0] = new BulkItemRequest(0, writeRequest); + BulkShardRequest request = new BulkShardRequest(shardId, refreshPolicy, items); + new WriteReplicationAction(request, wrapBulkListener, this).execute(); return listener.get(); } @@ -623,9 +621,9 @@ public void respond(ActionListener listener) { } - class IndexingAction extends ReplicationAction { + class WriteReplicationAction extends ReplicationAction { - IndexingAction(BulkShardRequest request, ActionListener listener, ReplicationGroup replicationGroup) { + WriteReplicationAction(BulkShardRequest request, ActionListener listener, ReplicationGroup replicationGroup) { super(request, listener, replicationGroup, "indexing"); } @@ -753,7 +751,7 @@ protected PrimaryResult performOnPrimary(IndexShard primary, ResyncReplicationRe @Override protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { - executeResyncOnReplica(replica, request); + executeResyncOnReplica(replica, request, getPrimaryShard().getPendingPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint()); } } @@ -766,8 +764,15 @@ private TransportWriteAction.WritePrimaryResult acquirePermitFuture = new PlainActionFuture<>(); + replica.acquireReplicaOperationPermit( + operationPrimaryTerm, globalCheckpointOnPrimary, acquirePermitFuture, ThreadPool.Names.SAME, request); + try (Releasable ignored = acquirePermitFuture.actionGet()) { + location = TransportResyncReplicationAction.performOnReplica(request, replica); + } TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 2f4a3dfd6c12c..375e74f6cca3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; @@ -156,7 +157,6 @@ public Settings threadPoolSettings() { return Settings.EMPTY; } - protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); } @@ -169,7 +169,6 @@ public Directory newDirectory() throws IOException { } }; return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } /** @@ -179,7 +178,17 @@ public Directory newDirectory() throws IOException { * another shard) */ protected IndexShard newShard(boolean primary) throws IOException { - return newShard(primary, Settings.EMPTY, new InternalEngineFactory()); + return newShard(primary, Settings.EMPTY); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + */ + protected IndexShard newShard(final boolean primary, final Settings settings) throws IOException { + return newShard(primary, settings, new InternalEngineFactory()); } /** @@ -318,23 +327,25 @@ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping - * @param store an optional custom store to use. If null a default file based store will be created + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, - @Nullable Store store, @Nullable IndexSearcherWrapper indexSearcherWrapper, + @Nullable CheckedFunction storeProvider, + @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; - if (store == null) { - store = createStore(indexSettings, shardPath); + if (storeProvider == null) { + storeProvider = is -> createStore(is, shardPath); } + final Store store = storeProvider.apply(indexSettings); boolean success = false; try { IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); @@ -424,7 +435,18 @@ protected IndexShard newStartedShard(final boolean primary) throws IOException { */ protected IndexShard newStartedShard( final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException { - IndexShard shard = newShard(primary, settings, engineFactory); + return newStartedShard(p -> newShard(p, settings, engineFactory), primary); + } + + /** + * creates a new empty shard and starts it. + * + * @param shardFunction shard factory function + * @param primary controls whether the shard will be a primary or a replica. + */ + protected IndexShard newStartedShard(CheckedFunction shardFunction, + boolean primary) throws IOException { + IndexShard shard = shardFunction.apply(primary); if (primary) { recoverShardFromStore(shard); } else { diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 0d340a91d4cea..0ee5798efb30b 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -96,6 +96,14 @@ public void execute(Map ctx) { } }; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(IngestConditionalScript.class)) { + IngestConditionalScript.Factory factory = parameters -> new IngestConditionalScript(parameters) { + @Override + public boolean execute(Map ctx) { + return (boolean) script.apply(ctx); + } + }; + return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(UpdateScript.class)) { UpdateScript.Factory factory = parameters -> new UpdateScript(parameters) { @Override @@ -242,16 +250,18 @@ public MovingFunctionScript createMovingFunctionScript() { return new MockMovingFunctionScript(); } - public ScriptedMetricAggContexts.InitScript createMetricAggInitScript(Map params, Object state) { + public ScriptedMetricAggContexts.InitScript createMetricAggInitScript(Map params, Map state) { return new MockMetricAggInitScript(params, state, script != null ? script : ctx -> 42d); } - public ScriptedMetricAggContexts.MapScript.LeafFactory createMetricAggMapScript(Map params, Object state, + public ScriptedMetricAggContexts.MapScript.LeafFactory createMetricAggMapScript(Map params, + Map state, SearchLookup lookup) { return new MockMetricAggMapScript(params, state, lookup, script != null ? script : ctx -> 42d); } - public ScriptedMetricAggContexts.CombineScript createMetricAggCombineScript(Map params, Object state) { + public ScriptedMetricAggContexts.CombineScript createMetricAggCombineScript(Map params, + Map state) { return new MockMetricAggCombineScript(params, state, script != null ? script : ctx -> 42d); } @@ -415,7 +425,7 @@ public double execute(Query query, Field field, Term term) throws IOException { public static class MockMetricAggInitScript extends ScriptedMetricAggContexts.InitScript { private final Function, Object> script; - MockMetricAggInitScript(Map params, Object state, + MockMetricAggInitScript(Map params, Map state, Function, Object> script) { super(params, state); this.script = script; @@ -436,11 +446,11 @@ public void execute() { public static class MockMetricAggMapScript implements ScriptedMetricAggContexts.MapScript.LeafFactory { private final Map params; - private final Object state; + private final Map state; private final SearchLookup lookup; private final Function, Object> script; - MockMetricAggMapScript(Map params, Object state, SearchLookup lookup, + MockMetricAggMapScript(Map params, Map state, SearchLookup lookup, Function, Object> script) { this.params = params; this.state = state; @@ -473,7 +483,7 @@ public void execute() { public static class MockMetricAggCombineScript extends ScriptedMetricAggContexts.CombineScript { private final Function, Object> script; - MockMetricAggCombineScript(Map params, Object state, + MockMetricAggCombineScript(Map params, Map state, Function, Object> script) { super(params, state); this.script = script; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 6ec32f6654fff..5aeb30bfdbd5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test; import org.elasticsearch.common.Strings; @@ -34,9 +35,17 @@ public abstract class AbstractSerializingTestCase { - IndicesStatsResponse stats = client().admin().indices().prepareStats().clear().get(); - for (IndexStats indexStats : stats.getIndices().values()) { - for (IndexShardStats indexShardStats : indexStats.getIndexShards().values()) { - Optional maybePrimary = Stream.of(indexShardStats.getShards()) - .filter(s -> s.getShardRouting().active() && s.getShardRouting().primary()) - .findFirst(); - if (maybePrimary.isPresent() == false) { + final ClusterState state = clusterService().state(); + for (ObjectObjectCursor indexRoutingTable : state.routingTable().indicesRouting()) { + for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.value.shards()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + if (primaryShardRouting == null || primaryShardRouting.assignedToNode() == false) { continue; } - ShardStats primary = maybePrimary.get(); - final SeqNoStats primarySeqNoStats = primary.getSeqNoStats(); - final ShardRouting primaryShardRouting = primary.getShardRouting(); + DiscoveryNode primaryNode = state.nodes().get(primaryShardRouting.currentNodeId()); + IndexShard primaryShard = internalCluster().getInstance(IndicesService.class, primaryNode.getName()) + .indexServiceSafe(primaryShardRouting.index()).getShard(primaryShardRouting.id()); + final SeqNoStats primarySeqNoStats; + final ObjectLongMap syncGlobalCheckpoints; + try { + primarySeqNoStats = primaryShard.seqNoStats(); + syncGlobalCheckpoints = primaryShard.getInSyncGlobalCheckpoints(); + } catch (AlreadyClosedException ex) { + continue; // shard is closed - just ignore + } assertThat(primaryShardRouting + " should have set the global checkpoint", - primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); - final DiscoveryNode node = clusterService().state().nodes().get(primaryShardRouting.currentNodeId()); - final IndicesService indicesService = - internalCluster().getInstance(IndicesService.class, node.getName()); - final IndexShard indexShard = indicesService.getShardOrNull(primaryShardRouting.shardId()); - final ObjectLongMap globalCheckpoints = indexShard.getInSyncGlobalCheckpoints(); - for (ShardStats shardStats : indexShardStats) { - final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); - assertThat(shardStats.getShardRouting() + " local checkpoint mismatch", - seqNoStats.getLocalCheckpoint(), equalTo(primarySeqNoStats.getLocalCheckpoint())); - assertThat(shardStats.getShardRouting() + " global checkpoint mismatch", - seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); - assertThat(shardStats.getShardRouting() + " max seq no mismatch", - seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); + primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); + for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + if (replicaShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode replicaNode = state.nodes().get(replicaShardRouting.currentNodeId()); + IndexShard replicaShard = internalCluster().getInstance(IndicesService.class, replicaNode.getName()) + .indexServiceSafe(replicaShardRouting.index()).getShard(replicaShardRouting.id()); + final SeqNoStats seqNoStats; + try { + seqNoStats = replicaShard.seqNoStats(); + } catch (AlreadyClosedException e) { + continue; // shard is closed - just ignore + } + assertThat(replicaShardRouting + " local checkpoint mismatch", + seqNoStats.getLocalCheckpoint(), equalTo(primarySeqNoStats.getLocalCheckpoint())); + assertThat(replicaShardRouting + " global checkpoint mismatch", + seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); + assertThat(replicaShardRouting + " max seq no mismatch", + seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); // the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard - assertThat( - seqNoStats.getGlobalCheckpoint(), - equalTo(globalCheckpoints.get(shardStats.getShardRouting().allocationId().getId()))); + assertThat(replicaShardRouting + " global checkpoint syncs mismatch", seqNoStats.getGlobalCheckpoint(), + equalTo(syncGlobalCheckpoints.get(replicaShardRouting.allocationId().getId()))); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java index 76cfcce033b11..44b845e5c2cac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -33,7 +33,7 @@ */ public class EqualsHashCodeTestUtils { - private static Object[] someObjects = new Object[] { "some string", new Integer(1), new Double(1.0) }; + private static Object[] someObjects = new Object[] { "some string", Integer.valueOf(1), Double.valueOf(1.0) }; /** * A function that makes a copy of its input argument diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 72d90a850caa9..4c813372fae31 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1173,7 +1173,11 @@ public void assertConsistentHistoryBetweenTranslogAndLuceneIndex() throws IOExce IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - IndexShardTestCase.assertConsistentHistoryBetweenTranslogAndLucene(indexShard); + try { + IndexShardTestCase.assertConsistentHistoryBetweenTranslogAndLucene(indexShard); + } catch (AlreadyClosedException ignored) { + // shard is closed + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index 4c4fe8f76ad89..b9a0e4a9b1ea0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -75,27 +75,20 @@ public static Path getIndexDir( final String indexFile, final Path dataDir) throws IOException { final Version version = Version.fromString(indexName.substring("index-".length())); - if (version.before(Version.V_5_0_0_alpha1)) { - // the bwc scripts packs the indices under this path - Path src = dataDir.resolve("nodes/0/indices/" + indexName); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); - return src; - } else { - final List indexFolders = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), - (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... - for (final Path path : stream) { - indexFolders.add(path); - } + final List indexFolders = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), + (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... + for (final Path path : stream) { + indexFolders.add(path); } - assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, - indexFolders.get(0)); - assertNotNull(indexMetaData); - assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); - assertThat(indexMetaData.getCreationVersion(), equalTo(version)); - return indexFolders.get(0); } + assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + indexFolders.get(0)); + assertNotNull(indexMetaData); + assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); + assertThat(indexMetaData.getCreationVersion(), equalTo(version)); + return indexFolders.get(0); } // randomly distribute the files from src over dests paths diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index 82d8dbeebe6a1..6ecaae75a8ee2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -32,6 +32,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; /** @@ -70,8 +71,13 @@ protected void doAssert(Object actualValue, Object expectedValue) { } } - assertNotNull("field [" + getField() + "] is null", actualValue); logger.trace("assert that [{}] matches [{}] (field [{}])", actualValue, expectedValue, getField()); + if (expectedValue == null) { + assertNull("field [" + getField() + "] should be null but was [" + actualValue + "]", actualValue); + return; + } + assertNotNull("field [" + getField() + "] is null", actualValue); + if (actualValue.getClass().equals(safeClass(expectedValue)) == false) { if (actualValue instanceof Number && expectedValue instanceof Number) { //Double 1.0 is equal to Integer 1 diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index a7d3e85d3009d..132a07d5b7f48 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -41,9 +41,9 @@ import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -51,7 +51,6 @@ import org.elasticsearch.transport.TransportStats; import java.io.IOException; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -66,13 +65,15 @@ import static org.apache.lucene.util.LuceneTestCase.rarely; -/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ +/** + * A transport class that doesn't send anything but rather captures all requests for inspection from tests + */ public class CapturingTransport implements Transport { private volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); + private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportConnectionListener listener; + private TransportMessageListener listener; public static class CapturedRequest { public final DiscoveryNode node; @@ -80,7 +81,7 @@ public static class CapturedRequest { public final String action; public final TransportRequest request; - public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { this.node = node; this.requestId = requestId; this.action = action; @@ -96,41 +97,15 @@ public TransportService createCapturingTransportService(Settings settings, Threa @Nullable ClusterSettings clusterSettings, Set taskHeaders) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), settings, this, threadPool); - connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> true); - connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> new Connection() { - @Override - public DiscoveryNode getNode() { - return discoveryNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { - requests.put(requestId, Tuple.tuple(discoveryNode, action)); - capturedRequests.add(new CapturedRequest(discoveryNode, requestId, action, request)); - } - - @Override - public void addCloseListener(ActionListener listener) { - - } - - @Override - public boolean isClosed() { - return false; - } - - @Override - public void close() { - - } - }); + connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); + connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> openConnection(discoveryNode, null)); return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, connectionManager); - } - /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ + /** + * returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} + */ public CapturedRequest[] capturedRequests() { return capturedRequests.toArray(new CapturedRequest[0]); } @@ -178,12 +153,16 @@ public Map> getCapturedRequestsByTargetNodeAndClea return groupRequestsByTargetNode(requests); } - /** clears captured requests */ + /** + * clears captured requests + */ public void clear() { capturedRequests.clear(); } - /** simulate a response for the given requestId */ + /** + * simulate a response for the given requestId + */ public void handleResponse(final long requestId, final TransportResponse response) { responseHandlers.onResponseReceived(requestId, listener).handleResponse(response); } @@ -194,7 +173,7 @@ public void handleResponse(final long requestId, final TransportResponse respons * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleLocalError(final long requestId, final Throwable t) { Tuple request = requests.get(requestId); @@ -208,7 +187,7 @@ public void handleLocalError(final long requestId, final Throwable t) { * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleRemoteError(final long requestId, final Throwable t) { final RemoteTransportException remoteException; @@ -234,7 +213,7 @@ public void handleRemoteError(final long requestId, final Throwable t) { * * @param requestId the id corresponding to the captured send * request - * @param e the failure + * @param e the failure */ public void handleError(final long requestId, final TransportException e) { responseHandlers.onResponseReceived(requestId, listener).handleException(e); @@ -251,13 +230,11 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws TransportException { - requests.put(requestId, Tuple.tuple(node, action)); - capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + onSendRequest(requestId, action, request, node); } @Override public void addCloseListener(ActionListener listener) { - } @Override @@ -267,11 +244,19 @@ public boolean isClosed() { @Override public void close() { - } }; } + protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { + requests.put(requestId, Tuple.tuple(node, action)); + capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + } + + protected boolean nodeConnected(DiscoveryNode discoveryNode) { + return true; + } + @Override public TransportStats getStats() { throw new UnsupportedOperationException(); @@ -288,7 +273,7 @@ public Map profileBoundAddresses() { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address, int perAddressLimit) { return new TransportAddress[0]; } @@ -299,22 +284,23 @@ public Lifecycle.State lifecycleState() { @Override public void addLifecycleListener(LifecycleListener listener) { - } @Override public void removeLifecycleListener(LifecycleListener listener) { - } @Override - public void start() {} + public void start() { + } @Override - public void stop() {} + public void stop() { + } @Override - public void close() {} + public void close() { + } @Override public List getLocalAddresses() { @@ -330,6 +316,7 @@ public void registerRequestHandler(RequestHan requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); } } + @Override public ResponseHandlers getResponseHandlers() { return responseHandlers; @@ -341,7 +328,7 @@ public RequestHandlerRegistry getRequestHandler(String action) { } @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { if (this.listener != null) { throw new IllegalStateException("listener already set"); } @@ -349,11 +336,12 @@ public void addConnectionListener(TransportConnectionListener listener) { } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { if (listener == this.listener) { this.listener = null; return true; } return false; } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 15ab06d651e92..d6c4f30a885d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -95,6 +95,12 @@ public List> getSettings() { public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings) { + MockTcpTransport mockTcpTransport = newMockTransport(settings, version, threadPool); + return createNewService(settings, mockTcpTransport, version, threadPool, clusterSettings, + Collections.emptySet()); + } + + public static MockTcpTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use @@ -102,9 +108,8 @@ public static MockTransportService createNewService(Settings settings, Version v int basePort = 10300 + (JVM_ORDINAL * 100); // use a non-default port otherwise some cluster in this JVM might reuse a port settings = Settings.builder().put(TcpTransport.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + return new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - return createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); } public static MockTransportService createNewService(Settings settings, Transport transport, Version version, ThreadPool threadPool, diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 486ccc805d055..012369feb839f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -120,8 +120,8 @@ public void disconnectFromNode(DiscoveryNode node) { } @Override - public int connectedNodeCount() { - return delegate.connectedNodeCount(); + public int size() { + return delegate.size(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 5a0dd3b7f6d53..d35fe609c0855 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -29,8 +29,8 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportStats; @@ -41,7 +41,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -public class StubbableTransport implements Transport { +public final class StubbableTransport implements Transport { private final ConcurrentHashMap sendBehaviors = new ConcurrentHashMap<>(); private final ConcurrentHashMap connectBehaviors = new ConcurrentHashMap<>(); @@ -60,6 +60,12 @@ boolean setDefaultSendBehavior(SendRequestBehavior sendBehavior) { return prior == null; } + public boolean setDefaultConnectBehavior(OpenConnectionBehavior openConnectionBehavior) { + OpenConnectionBehavior prior = this.defaultConnectBehavior; + this.defaultConnectBehavior = openConnectionBehavior; + return prior == null; + } + boolean addSendBehavior(TransportAddress transportAddress, SendRequestBehavior sendBehavior) { return sendBehaviors.put(transportAddress, sendBehavior) == null; } @@ -86,13 +92,13 @@ Transport getDelegate() { } @Override - public void addConnectionListener(TransportConnectionListener listener) { - delegate.addConnectionListener(listener); + public void addMessageListener(TransportMessageListener listener) { + delegate.addMessageListener(listener); } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { - return delegate.removeConnectionListener(listener); + public boolean removeMessageListener(TransportMessageListener listener) { + return delegate.removeMessageListener(listener); } @Override @@ -179,7 +185,7 @@ public Map profileBoundAddresses() { return delegate.profileBoundAddresses(); } - private class WrappedConnection implements Transport.Connection { + public class WrappedConnection implements Transport.Connection { private final Transport.Connection connection; @@ -234,6 +240,10 @@ public Object getCacheKey() { public void close() { connection.close(); } + + public Transport.Connection getConnection() { + return connection; + } } @FunctionalInterface diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 29997b16ba071..c485f9d45bda4 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.ClusterSettings; @@ -52,6 +53,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -766,6 +768,7 @@ public void onAfter() { public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); + final CountDownLatch latch3 = new CountDownLatch(1); try { serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { @@ -775,6 +778,8 @@ public void testNotifyOnShutdown() throws Exception { serviceB.stop(); } catch (Exception e) { fail(e.getMessage()); + } finally { + latch3.countDown(); } }); TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", @@ -786,6 +791,7 @@ public void testNotifyOnShutdown() throws Exception { } catch (TransportException ex) { } + latch3.await(); } finally { serviceB.close(); // make sure we are fully closed here otherwise we might run into assertions down the road serviceA.disconnectFromNode(nodeB); @@ -2642,15 +2648,22 @@ public void testProfilesIncludesDefault() { public void testChannelCloseWhileConnecting() { try (MockTransportService service = build(Settings.builder().put("name", "close").build(), version0, null, true)) { - service.transport.addConnectionListener(new TransportConnectionListener() { + AtomicBoolean connectionClosedListenerCalled = new AtomicBoolean(false); + service.addConnectionListener(new TransportConnectionListener() { @Override public void onConnectionOpened(final Transport.Connection connection) { + closeConnectionChannel(connection); try { - closeConnectionChannel(service.getOriginalTransport(), connection); - } catch (final IOException e) { + assertBusy(() -> assertTrue(connection.isClosed())); + } catch (Exception e) { throw new AssertionError(e); } } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + connectionClosedListenerCalled.set(true); + } }); final ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, @@ -2662,10 +2675,15 @@ public void onConnectionOpened(final Transport.Connection connection) { final ConnectTransportException e = expectThrows(ConnectTransportException.class, () -> service.openConnection(nodeA, builder.build())); assertThat(e, hasToString(containsString(("a channel closed while connecting")))); + assertTrue(connectionClosedListenerCalled.get()); } } - protected abstract void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException; + private void closeConnectionChannel(Transport.Connection connection) { + StubbableTransport.WrappedConnection wrappedConnection = (StubbableTransport.WrappedConnection) connection; + TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) wrappedConnection.getConnection(); + CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); + } @SuppressForbidden(reason = "need local ephemeral port") private InetSocketAddress getLocalEphemeral() throws UnknownHostException { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index e6d80ac24d88e..996508bdb887a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -162,7 +163,8 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx @Override @SuppressForbidden(reason = "real socket for mocking remote connections") - protected MockChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); final MockSocket socket = new MockSocket(); final MockChannel channel = new MockChannel(socket, address, "none"); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index fbe61db6ee721..19543cfdcbb15 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -85,7 +86,8 @@ protected MockServerChannel bind(String name, InetSocketAddress address) throws } @Override - protected MockSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockSocketChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); MockSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 5da8601a9f340..500cff893cb1f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -152,7 +152,7 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio parser = createParser(YamlXContent.yamlXContent, "\"First test section\": \n" + " - skip:\n" + - " version: \"5.0.0 - 5.2.0\"\n" + + " version: \"6.0.0 - 6.2.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do :\n" + " catch: missing\n" + @@ -167,9 +167,9 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(testSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_2_0)); + equalTo(Version.V_6_2_0)); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 4c97eb453610e..71814593ad487 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -66,10 +66,10 @@ public void testParseTestSetupTeardownAndSections() throws Exception { " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + "\n" + "---\n" + - "\"Get type mapping - pre 5.0\":\n" + + "\"Get type mapping - pre 6.0\":\n" + "\n" + " - skip:\n" + - " version: \"5.0.0 - \"\n" + + " version: \"6.0.0 - \"\n" + " reason: \"for newer versions the index name is always returned\"\n" + "\n" + " - do:\n" + @@ -97,7 +97,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { } else { assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true)); } - + assertThat(restTestSuite.getTeardownSection(), notNullValue()); if (includeTeardown) { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); @@ -131,12 +131,12 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); assertThat(restTestSuite.getTestSections().get(1).getName(), - equalTo("Get type mapping - pre 5.0")); + equalTo("Get type mapping - pre 6.0")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), - equalTo(Version.V_5_0_0)); + equalTo(Version.V_6_0_0)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java new file mode 100644 index 0000000000000..2bd7234744121 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.test.ESTestCase; + +public class MatchAssertionTests extends ESTestCase { + + public void testNull() { + XContentLocation xContentLocation = new XContentLocation(0, 0); + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", null); + matchAssertion.doAssert(null, null); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert("non-null", null)); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "non-null"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "non-null")); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "/exp/"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "/exp/")); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index cb9ab009b2594..e883e8e062af2 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -53,7 +53,7 @@ public void testParseSetupSection() throws Exception { public void testParseSetupAndSkipSectionNoSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do:\n" + " index1:\n" + @@ -74,9 +74,9 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(setupSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_3_0)); + equalTo(Version.V_6_3_0)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getDoSections().size(), equalTo(2)); assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 3ab9583335e7c..e5e466a82cc18 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,17 +34,17 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkip() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_5_0_0)); - section = new SkipSection(randomBoolean() ? null : "5.0.0 - 5.1.0", + assertTrue(section.skip(Version.V_6_0_0)); + section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); } public void testMessage() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); section = new SkipSection(null, Collections.singletonList("warnings"), "foobar"); @@ -55,14 +55,14 @@ public void testMessage() { public void testParseSkipSectionVersionNoFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, - "version: \" - 5.1.1\"\n" + + "version: \" - 6.1.1\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_5_1_1)); + assertThat(skipSection.getUpperVersion(), equalTo(Version.V_6_1_1)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 15ca1ec0096e3..07afa9f33b5b1 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -56,7 +56,7 @@ public void testParseTeardownSection() throws Exception { public void testParseWithSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"there is a reason\"\n" + " - do:\n" + " delete:\n" + @@ -75,8 +75,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_5_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 6d1e5116474ff..42658b1d9a60f 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -60,13 +59,4 @@ protected Version executeHandshake(DiscoveryNode node, TcpChannel mockChannel, T public int channelsPerNodeConnection() { return 1; } - - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - final MockTcpTransport t = (MockTcpTransport) transport; - final TcpTransport.NodeChannels channels = - (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java index 3a78f366bd87f..8b3e7dce367d8 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -99,12 +98,6 @@ protected int channelsPerNodeConnection() { return 3; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index c16dab6a625c8..6ab975fd42eba 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -29,7 +27,7 @@ loggerUsageCheck.enabled = false forbiddenApisMain.enabled = true // disabled by parent project forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures + replaceSignatureFiles 'jdk-signatures' // does not depend on core, only jdk signatures } jarHell.enabled = true // disabled by parent project @@ -44,14 +42,4 @@ thirdPartyAudit.excludes = [ 'org.osgi.framework.SynchronousBundleListener', 'org.osgi.framework.wiring.BundleWire', 'org.osgi.framework.wiring.BundleWiring' -] - -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} \ No newline at end of file +] \ No newline at end of file diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 70f6061985a6a..f027493b0abed 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -14,23 +14,13 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/security/authorization/run-as-privilege.asciidoc', 'en/security/ccs-clients-integrations/http.asciidoc', 'en/security/authorization/custom-roles-provider.asciidoc', - 'en/rest-api/ml/delete-snapshot.asciidoc', - 'en/rest-api/ml/get-bucket.asciidoc', - 'en/rest-api/ml/get-job-stats.asciidoc', - 'en/rest-api/ml/get-overall-buckets.asciidoc', - 'en/rest-api/ml/get-category.asciidoc', - 'en/rest-api/ml/get-record.asciidoc', - 'en/rest-api/ml/get-influencer.asciidoc', - 'en/rest-api/ml/get-snapshot.asciidoc', - 'en/rest-api/ml/post-data.asciidoc', - 'en/rest-api/ml/revert-snapshot.asciidoc', - 'en/rest-api/ml/update-snapshot.asciidoc', 'en/rest-api/watcher/stats.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } @@ -309,7 +299,7 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' "job_id":"farequote", "indexes":"farequote" } -''' +''' setups['ml_filter_safe_domains'] = ''' - do: xpack.ml.put_filter: @@ -684,9 +674,8 @@ setups['sensor_prefab_data'] = ''' page_size: 1000 groups: date_histogram: - delay: "7d" field: "timestamp" - interval: "1h" + interval: "7d" time_zone: "UTC" terms: fields: @@ -722,3 +711,49 @@ setups['sensor_prefab_data'] = ''' {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} ''' +setups['admin_role'] = ''' + - do: + xpack.security.put_role: + name: "my_admin_role" + body: > + { + "cluster": ["all"], + "indices": [ + {"names": ["index1", "index2" ], "privileges": ["all"], "field_security" : {"grant" : [ "title", "body" ]}} + ], + "run_as": [ "other_user" ], + "metadata" : {"version": 1} + } +''' +setups['jacknich_user'] = ''' + - do: + xpack.security.put_user: + username: "jacknich" + body: > + { + "password" : "test-password", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "metadata" : { "intelligence" : 7 } + } +''' +setups['app0102_privileges'] = ''' + - do: + xpack.security.put_privileges: + body: > + { + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" ], + "metadata": { + "description": "Read access to myapp" + } + } + } + } +''' diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc deleted file mode 100644 index be74167862e15..0000000000000 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -[role="xpack"] -[[ml-api-quickref]] -== API quick reference - -All {ml} endpoints have the following base: - -[source,js] ----- -/_xpack/ml/ ----- -// NOTCONSOLE - -The main {ml} resources can be accessed with a variety of endpoints: - -* <>: Create and manage {ml} jobs -* <>: Select data from {es} to be analyzed -* <>: Access the results of a {ml} job -* <>: Manage model snapshots -//* <>: Validate subsections of job configurations - -[float] -[[ml-api-jobs]] -=== /anomaly_detectors/ - -* {ref}/ml-put-job.html[PUT /anomaly_detectors/+++]: Create a job -* {ref}/ml-open-job.html[POST /anomaly_detectors//_open]: Open a job -* {ref}/ml-post-data.html[POST /anomaly_detectors//_data]: Send data to a job -* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs -* {ref}/ml-get-job.html[GET /anomaly_detectors/+++]: Get job details -* {ref}/ml-get-job-stats.html[GET /anomaly_detectors//_stats]: Get job statistics -* {ref}/ml-update-job.html[POST /anomaly_detectors//_update]: Update certain properties of the job configuration -* {ref}/ml-flush-job.html[POST anomaly_detectors//_flush]: Force a job to analyze buffered data -* {ref}/ml-forecast.html[POST anomaly_detectors//_forecast]: Forecast future job behavior -* {ref}/ml-close-job.html[POST /anomaly_detectors//_close]: Close a job -* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/+++]: Delete a job - -[float] -[[ml-api-calendars]] -=== /calendars/ - -* {ref}/ml-put-calendar.html[PUT /calendars/+++]: Create a calendar -* {ref}/ml-post-calendar-event.html[POST /calendars/+++/events]: Add a scheduled event to a calendar -* {ref}/ml-put-calendar-job.html[PUT /calendars/+++/jobs/+++]: Associate a job with a calendar -* {ref}/ml-get-calendar.html[GET /calendars/+++]: Get calendar details -* {ref}/ml-get-calendar-event.html[GET /calendars/+++/events]: Get scheduled event details -* {ref}/ml-delete-calendar-event.html[DELETE /calendars/+++/events/+++]: Remove a scheduled event from a calendar -* {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar -* {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar - -[float] -[[ml-api-filters]] -=== /filters/ - -* {ref}/ml-put-filter.html[PUT /filters/+++]: Create a filter -* {ref}/ml-update-filter.html[POST /filters/+++/_update]: Update a filter -* {ref}/ml-get-filter.html[GET /filters/+++]: List filters -* {ref}/ml-delete-filter.html[DELETE /filter/+++]: Delete a filter - -[float] -[[ml-api-datafeeds]] -=== /datafeeds/ - -* {ref}/ml-put-datafeed.html[PUT /datafeeds/+++]: Create a {dfeed} -* {ref}/ml-start-datafeed.html[POST /datafeeds//_start]: Start a {dfeed} -* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds} -* {ref}/ml-get-datafeed.html[GET /datafeeds/+++]: Get {dfeed} details -* {ref}/ml-get-datafeed-stats.html[GET /datafeeds//_stats]: Get statistical information for {dfeeds} -* {ref}/ml-preview-datafeed.html[GET /datafeeds//_preview]: Get a preview of a {dfeed} -* {ref}/ml-update-datafeed.html[POST /datafeeds//_update]: Update certain settings for a {dfeed} -* {ref}/ml-stop-datafeed.html[POST /datafeeds//_stop]: Stop a {dfeed} -* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/+++]: Delete {dfeed} - -[float] -[[ml-api-results]] -=== /results/ - -* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results -* {ref}/ml-get-bucket.html[GET /results/buckets/+++]: Get bucket details -* {ref}/ml-get-overall-buckets.html[GET /results/overall_buckets]: Get overall bucket results for multiple jobs -* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results -* {ref}/ml-get-category.html[GET /results/categories/+++]: Get category details -* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details -* {ref}/ml-get-record.html[GET /results/records]: Get records from the results - -[float] -[[ml-api-snapshots]] -=== /model_snapshots/ - -* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots -* {ref}/ml-get-snapshot.html[GET /model_snapshots/+++]: Get model snapshot details -* {ref}/ml-revert-snapshot.html[POST /model_snapshots//_revert]: Revert a model snapshot -* {ref}/ml-update-snapshot.html[POST /model_snapshots//_update]: Update certain settings for a model snapshot -* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/+++]: Delete a model snapshot - -//// -[float] -[[ml-api-validate]] -=== /validate/ - -* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector -* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job -//// diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc deleted file mode 100644 index 349ce343c7ae9..0000000000000 --- a/x-pack/docs/en/rest-api/defs.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[role="xpack"] -[[ml-api-definitions]] -== Definitions - -These resource definitions are used in {ml} APIs and in {kib} advanced -job configuration options. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -[role="xpack"] -include::ml/calendarresource.asciidoc[] -[role="xpack"] -include::ml/datafeedresource.asciidoc[] -[role="xpack"] -include::ml/filterresource.asciidoc[] -[role="xpack"] -include::ml/jobresource.asciidoc[] -[role="xpack"] -include::ml/jobcounts.asciidoc[] -[role="xpack"] -include::ml/snapshotresource.asciidoc[] -[role="xpack"] -include::ml/resultsresource.asciidoc[] -[role="xpack"] -include::ml/eventresource.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 227e343192a50..3ba582d5d7856 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -2,21 +2,89 @@ [[security-api]] == Security APIs +You can use the following APIs to perform {security} activities. + * <> * <> -* <> -* <> -* <> +* <> * <> -* <> -* <> +[float] +[[security-api-app-privileges]] +=== Application privileges + +You can use the following APIs to add, update, retrieve, and remove application +privileges: + +* <> +* <> +* <> + +[float] +[[security-role-mapping-apis]] +=== Role mappings + +You can use the following APIs to add, remove, update, and retrieve role mappings: + +* <> +* <> +* <> + +[float] +[[security-role-apis]] +=== Roles + +You can use the following APIs to add, remove, update, and retrieve roles in the native realm: + +* <> +* <> +* <> +* <> + +[float] +[[security-token-apis]] +=== Tokens + +You can use the following APIs to create and invalidate bearer tokens for access +without requiring basic authentication: + +* <> +* <> + +[float] +[[security-user-apis]] +=== Users + +You can use the following APIs to add, remove, update, or retrieve users in the +native realm: + +* <> +* <> +* <> +* <> +* <> +* <> + + +include::security/put-app-privileges.asciidoc[] include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] -include::security/privileges.asciidoc[] -include::security/roles.asciidoc[] -include::security/role-mapping.asciidoc[] +include::security/create-role-mappings.asciidoc[] +include::security/clear-roles-cache.asciidoc[] +include::security/create-roles.asciidoc[] +include::security/create-users.asciidoc[] +include::security/delete-app-privileges.asciidoc[] +include::security/delete-role-mappings.asciidoc[] +include::security/delete-roles.asciidoc[] +include::security/delete-tokens.asciidoc[] +include::security/delete-users.asciidoc[] +include::security/disable-users.asciidoc[] +include::security/enable-users.asciidoc[] +include::security/get-app-privileges.asciidoc[] +include::security/get-role-mappings.asciidoc[] +include::security/get-roles.asciidoc[] +include::security/get-tokens.asciidoc[] +include::security/get-users.asciidoc[] +include::security/has-privileges.asciidoc[] include::security/ssl.asciidoc[] -include::security/tokens.asciidoc[] -include::security/users.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/change-password.asciidoc b/x-pack/docs/en/rest-api/security/change-password.asciidoc index 7dee98480e72c..6e6e8cf7375e4 100644 --- a/x-pack/docs/en/rest-api/security/change-password.asciidoc +++ b/x-pack/docs/en/rest-api/security/change-password.asciidoc @@ -1,9 +1,8 @@ [role="xpack"] [[security-api-change-password]] -=== Change Password API +=== Change passwords API -The Change Password API enables you to submit a request to change the password -of a user. +Changes the passwords of users in the native realm. ==== Request @@ -12,6 +11,15 @@ of a user. `POST _xpack/security/user//_password` +==== Description + +You can use the <> to update everything +but a user's `username` and `password`. This API changes a user's password. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + ==== Path Parameters `username`:: @@ -33,16 +41,17 @@ privilege can change passwords of other users. ==== Examples -The following example updates the password for the `elastic` user: +The following example updates the password for the `jacknich` user: [source,js] -------------------------------------------------- -POST _xpack/security/user/elastic/_password +POST /_xpack/security/user/jacknich/_password { - "password": "x-pack-test-password" + "password" : "s3cr3t" } -------------------------------------------------- // CONSOLE +// TEST[setup:jacknich_user] A successful call returns an empty JSON structure. diff --git a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc new file mode 100644 index 0000000000000..591d7eb2d11e4 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc @@ -0,0 +1,39 @@ +[role="xpack"] +[[security-api-clear-role-cache]] +=== Clear roles cache API + +Evicts roles from the native role cache. + +==== Request + +`POST /_xpack/security/role//_clear_cache` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`name`:: + (string) The name of the role. + + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The clear roles cache API evicts roles from the native role cache. For example, +to clear the cache for `my_admin_role`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role/_clear_cache +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc new file mode 100644 index 0000000000000..87dedbba4f7cf --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -0,0 +1,239 @@ +[role="xpack"] +[[security-api-put-role-mapping]] +=== Create or update role mappings API + +Creates and updates role mappings. + +==== Request + +`POST /_xpack/security/role_mapping/` + + +`PUT /_xpack/security/role_mapping/` + + +==== Description + +Role mappings define which roles are assigned to each user. Each mapping has +_rules_ that identify users and a list of _roles_ that are +granted to those users. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using <> or +{stack-ov}/defining-roles.html#roles-management-file[roles files]. + +For more information, see +{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role mapping: + +`enabled` (required):: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles` (required):: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules` (required):: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. See +<>. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example assigns the "user" role to all users: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping1 +{ + "roles": [ "user"], + "enabled": true, <1> + "rules": { + "field" : { "username" : "*" } + }, + "metadata" : { <2> + "version" : 1 + } +} +------------------------------------------------------------ +// CONSOLE +<1> Mappings that have `enabled` set to `false` are ignored when role mapping + is performed. +<2> Metadata is optional. + +A successful call returns a JSON structure that shows whether the mapping has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "role_mapping" : { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing mapping is updated, `created` is set to false. + +The following example assigns the "user" and "admin" roles to specific users: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role_mapping/mapping2 +{ + "roles": [ "user", "admin" ], + "enabled": true, + "rules": { + "field" : { "username" : [ "esadmin01", "esadmin02" ] } + } +} +-------------------------------------------------- +// CONSOLE + +The following example matches any user where either the username is `esadmin` +or the user is in the `cn=admin,dc=example,dc=com` group: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping3 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "any": [ + { + "field": { + "username": "esadmin" + } + }, + { + "field": { + "groups": "cn=admins,dc=example,dc=com" + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping4 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a specific LDAP sub-tree: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping5 +{ + "roles": [ "example-user" ], + "enabled": true, + "rules": { + "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a particular LDAP sub-tree in a +specific realm: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping6 +{ + "roles": [ "ldap-example-user" ], + "enabled": true, + "rules": { + "all": [ + { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, + { "field" : { "realm.name" : "ldap1" } } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The rules can be more complex and include wildcard matching. For example, the +following mapping matches any user where *all* of these conditions are met: + +- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, + or the username is `es-admin`, or the username is `es-system` +- the user in in the `cn=people,dc=example,dc=com` group +- the user does not have a `terminated_date` + + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping7 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "all": [ + { + "any": [ + { + "field": { + "dn": "*,ou=admin,dc=example,dc=com" + } + }, + { + "field": { + "username": [ "es-admin", "es-system" ] + } + } + ] + }, + { + "field": { + "groups": "cn=people,dc=example,dc=com" + } + }, + { + "except": { + "field": { + "metadata.terminated_date": null + } + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc new file mode 100644 index 0000000000000..fc3c613557ef0 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -0,0 +1,120 @@ +[role="xpack"] +[[security-api-put-role]] +=== Create or update roles API + +Adds and updates roles in the native realm. + +==== Request + +`POST /_xpack/security/role/` + + +`PUT /_xpack/security/role/` + + +==== Description + +The role API is generally the preferred way to manage roles, rather than using +file-based role management. For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + +==== Path Parameters + +`name`:: + (string) The name of the role. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role: + +`applications`:: (list) A list of application privilege entries. +`application` (required)::: (string) The name of the application to which this entry applies +`privileges`::: (list) A list of strings, where each element is the name of an application +privilege or action. +`resources`::: (list) A list resources to which the privileges are applied. + +`cluster`:: (list) A list of cluster privileges. These privileges define the +cluster level actions that users with this role are able to execute. + +`global`:: (object) An object defining global privileges. A global privilege is +a form of cluster privilege that is request-aware. Support for global privileges +is currently limited to the management of application privileges. +This field is optional. + +`indices`:: (list) A list of indices permissions entries. +`field_security`::: (list) The document fields that the owners of the role have +read access to. For more information, see +{stack-ov}/field-and-document-access-control.html[Setting up field and document level security]. +`names` (required)::: (list) A list of indices (or index name patterns) to which the +permissions in this entry apply. +`privileges`(required)::: (list) The index level privileges that the owners of the role +have on the specified indices. +`query`::: A search query that defines the documents the owners of the role have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the owners of the role. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + +`run_as`:: (list) A list of users that the owners of this role can impersonate. +For more information, see +{stack-ov}/run-as-privilege.html[Submitting requests on behalf of other users]. + +For more information, see {stack-ov}/defining-roles.html[Defining roles]. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example adds a role called `my_admin_role`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role +{ + "cluster": ["all"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": ["all"], + "field_security" : { // optional + "grant" : [ "title", "body" ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" // optional + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ "admin", "read" ], + "resources": [ "*" ] + } + ], + "run_as": [ "other_user" ], // optional + "metadata" : { // optional + "version" : 1 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the role has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "role": { + "created": true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing role is updated, `created` is set to false. diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc new file mode 100644 index 0000000000000..91171b0e57eb4 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -0,0 +1,107 @@ +[role="xpack"] +[[security-api-put-user]] +=== Create or update users API + +Adds and updates users in the native realm. These users are commonly referred +to as _native users_. + + +==== Request + +`POST /_xpack/security/user/` + + +`PUT /_xpack/security/user/` + + +==== Description + +When updating a user, you can update everything but its `username` and `password`. +To change a user's password, use the +<>. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. ++ +-- +[[username-validation]] +NOTE: Usernames must be at least 1 and no more than 1024 characters. They can +contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and +printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed. + +-- + + +==== Request Body + +The following parameters can be specified in the body of a POST or PUT request: + +`enabled`:: +(boolean) Specifies whether the user is enabled. The default value is `true`. + +`email`:: +(string) The email of the user. + +`full_name`:: +(string) The full name of the user. + +`metadata`:: +(object) Arbitrary metadata that you want to associate with the user. + +`password` (required):: +(string) The user's password. Passwords must be at least 6 characters long. + +`roles` (required):: +(list) A set of roles the user has. The roles determine the user's access +permissions. To create a user without any roles, specify an empty list: `[]`. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example creates a user `jacknich`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "j@rV1s", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "metadata" : { + "intelligence" : 7 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the user has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "user": { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing user is updated, `created` is set to false. + +After you add a user, requests from that user can be authenticated. For example: + +[source,shell] +-------------------------------------------------- +curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health +-------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc new file mode 100644 index 0000000000000..d7f001721b1fd --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[[security-api-delete-privilege]] +=== Delete application privileges API + +Removes +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`DELETE /_xpack/security/privilege//` + +//==== Description + +==== Path Parameters + +`application` (required):: + (string) The name of the application. Application privileges are always + associated with exactly one application. + +`privilege` (required):: + (string) The name of the privilege. + +// ==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example deletes the `read` application privilege from the +`myapp` application: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "found" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc new file mode 100644 index 0000000000000..dc9bf2ba10904 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[[security-api-delete-role-mapping]] +=== Delete role mappings API + +Removes role mappings. + +==== Request + +`DELETE /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example delete a role mapping: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + +If the mapping is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc new file mode 100644 index 0000000000000..db42493ca0fb6 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[[security-api-delete-role]] +=== Delete roles API + +Removes roles in the native realm. + +==== Request + +`DELETE /_xpack/security/role/` + + +==== Description + +The Roles API is generally the preferred way to manage roles, rather than using +file-based role management. For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + +==== Path Parameters + +`name`:: + (string) The name of the role. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example deletes a `my_admin_role` role: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[setup:admin_role] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE + diff --git a/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc b/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc new file mode 100644 index 0000000000000..7d6bae2a4c40f --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc @@ -0,0 +1,54 @@ +[role="xpack"] +[[security-api-invalidate-token]] +=== Delete token API + +Invalidates a bearer token for access without requiring basic authentication. + +==== Request + +`DELETE /_xpack/security/oauth2/token` + +==== Description + +The tokens returned by the <> have a +finite period of time for which they are valid and after that time period, they +can no longer be used. That time period is defined by the +`xpack.security.authc.token.timeout` setting. For more information, see +<>. + +If you want to invalidate a token immediately, use this delete token API. + + +==== Request Body + +The following parameters can be specified in the body of a DELETE request and +pertain to deleting a token: + +`token` (required):: +(string) An access token. + +==== Examples + +The following example invalidates the specified token immediately: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/oauth2/token +{ + "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" +} +-------------------------------------------------- +// NOTCONSOLE + +A successful call returns a JSON structure that indicates whether the token +has already been invalidated. + +[source,js] +-------------------------------------------------- +{ + "created" : true <1> +} +-------------------------------------------------- +// NOTCONSOLE + +<1> When a token has already been invalidated, `created` is set to false. diff --git a/x-pack/docs/en/rest-api/security/delete-users.asciidoc b/x-pack/docs/en/rest-api/security/delete-users.asciidoc new file mode 100644 index 0000000000000..63a66795617bd --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-users.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[[security-api-delete-user]] +=== Delete users API + +Deletes users from the native realm. + +==== Request + +`DELETE /_xpack/security/user/` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example deletes the user `jacknich`: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] + +If the user is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/disable-users.asciidoc b/x-pack/docs/en/rest-api/security/disable-users.asciidoc new file mode 100644 index 0000000000000..f5a6bc7e9a136 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/disable-users.asciidoc @@ -0,0 +1,43 @@ +[role="xpack"] +[[security-api-disable-user]] +=== Disable users API + +Disables users in the native realm. + + +==== Request + +`PUT /_xpack/security/user//_disable` + + +==== Description + +By default, when you create users, they are enabled. You can use this API to +revoke a user's access to {es}. To re-enable a user, there is an +<>. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example disables the user `jacknich`: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_disable +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] diff --git a/x-pack/docs/en/rest-api/security/enable-users.asciidoc b/x-pack/docs/en/rest-api/security/enable-users.asciidoc new file mode 100644 index 0000000000000..cebaaffa7b28d --- /dev/null +++ b/x-pack/docs/en/rest-api/security/enable-users.asciidoc @@ -0,0 +1,42 @@ +[role="xpack"] +[[security-api-enable-user]] +=== Enable users API + +Enables users in the native realm. + + +==== Request + +`PUT /_xpack/security/user//_enable` + + +==== Description + +By default, when you create users, they are enabled. You can use this enable +users API and the <> to change that attribute. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example enables the user `jacknich`: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_enable +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] diff --git a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc new file mode 100644 index 0000000000000..5412a4bdceb83 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc @@ -0,0 +1,94 @@ +[role="xpack"] +[[security-api-get-privileges]] +=== Get application privileges API + +Retrieves +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`GET /_xpack/security/privilege` + + +`GET /_xpack/security/privilege/` + + +`GET /_xpack/security/privilege//` + + +==== Description + +To check a user's application privileges, use the +<>. + + +==== Path Parameters + +`application`:: + (string) The name of the application. Application privileges are always + associated with exactly one application. + If you do not specify this parameter, the API returns information about all + privileges for all applications. + +`privilege`:: + (string) The name of the privilege. If you do not specify this parameter, the + API returns information about all privileges for the requested application. + +//==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example retrieves information about the `read` privilege for the +`app01` application: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +A successful call returns an object keyed by application name and privilege +name. If the privilege is not defined, the request responds with a 404 status. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" + ], + "metadata": { + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +To retrieve all privileges for an application, omit the privilege name: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/ +-------------------------------------------------- +// CONSOLE + +To retrieve every privilege, omit both the application and privilege names: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/ +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc new file mode 100644 index 0000000000000..7abe34b32f560 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[security-api-get-role-mapping]] +=== Get role mappings API + +Retrieves role mappings. + +==== Request + +`GET /_xpack/security/role_mapping` + + +`GET /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. You can specify multiple + mapping names as a comma-separated list. If you do not specify this + parameter, the API returns information about all role mappings. + +//==== Request Body + +==== Results + +A successful call retrieves an object, where the keys are the +names of the request mappings, and the values are the JSON representation of +those mappings. For more information, see +<>. + +If there is no mapping with the requested name, the +response will have status code `404`. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example retrieves information about the `mapping1` role mapping: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + + +[source,js] +-------------------------------------------------- +{ + "mapping1": { + "enabled": true, + "roles": [ + "user" + ], + "rules": { + "field": { + "username": "*" + } + }, + "metadata": {} + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/get-roles.asciidoc b/x-pack/docs/en/rest-api/security/get-roles.asciidoc new file mode 100644 index 0000000000000..fa6e91b519b6f --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-roles.asciidoc @@ -0,0 +1,85 @@ +[role="xpack"] +[[security-api-get-role]] +=== Get roles API + +Retrieves roles in the native realm. + +==== Request + +`GET /_xpack/security/role` + + +`GET /_xpack/security/role/` + + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`name`:: + (string) The name of the role. You can specify multiple roles as a + comma-separated list. If you do not specify this parameter, the API + returns information about all roles. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example retrieves information about the `my_admin_role` role in +the native realm: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[setup:admin_role] + +A successful call returns an array of roles with the JSON representation of the +role. If the role is not defined in the native realm, the request returns 404. + +[source,js] +-------------------------------------------------- +{ + "my_admin_role": { + "cluster" : [ "all" ], + "indices" : [ + { + "names" : [ "index1", "index2" ], + "privileges" : [ "all" ], + "field_security" : { + "grant" : [ "title", "body" ]} + } + ], + "applications" : [ ], + "run_as" : [ "other_user" ], + "metadata" : { + "version" : 1 + }, + "transient_metadata": { + "enabled": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +To retrieve all roles, omit the role name: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: If single role is requested, that role is returned as the response. When +requesting multiple roles, an object is returned holding the found roles, each +keyed by the relevant role name. diff --git a/x-pack/docs/en/rest-api/security/tokens.asciidoc b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc similarity index 63% rename from x-pack/docs/en/rest-api/security/tokens.asciidoc rename to x-pack/docs/en/rest-api/security/get-tokens.asciidoc index f991a5c0cb836..c80b4f60c6bcd 100644 --- a/x-pack/docs/en/rest-api/security/tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc @@ -1,15 +1,12 @@ [role="xpack"] -[[security-api-tokens]] -=== Token Management APIs +[[security-api-get-token]] +=== Get token API -The `token` API enables you to create and invalidate bearer tokens for access -without requiring basic authentication. +Creates a bearer token for access without requiring basic authentication. ==== Request -`POST /_xpack/security/oauth2/token` + - -`DELETE /_xpack/security/oauth2/token` +`POST /_xpack/security/oauth2/token` ==== Description @@ -19,20 +16,20 @@ you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. -The Get Token API takes the same parameters as a typical OAuth 2.0 token API +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. -A successful Get Token API call returns a JSON structure that contains the access +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. -The tokens returned by the Get Token API have a finite period of time for which +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. For more information, see <>. -If you want to invalidate a token immediately, you can do so by using the Delete -Token API. +If you want to invalidate a token immediately, you can do so by using the +<>. ==== Request Body @@ -41,36 +38,39 @@ The following parameters can be specified in the body of a POST request and pertain to creating a token: `grant_type`:: -(string) The type of grant. Currently only the `password` grant type is supported. +(string) The type of grant. Supported grant types are: `password`, +`client_credentials` and `refresh_token`. + +`password`:: +(string) The user's password. If you specify the `password` grant type, this +parameter is required. This parameter is not valid with any other supported +grant type. -`password` (required):: -(string) The user's password. +`refresh_token`:: +(string) If you specify the `refresh_token` grant type, this parameter is +required. It contains the string that was returned when you created the token +and enables you to extend its life. This parameter is not valid with any other +supported grant type. `scope`:: (string) The scope of the token. Currently tokens are only issued for a scope of `FULL` regardless of the value sent with the request. -`username` (required):: -(string) The username that identifies the user. - -The following parameters can be specified in the body of a DELETE request and -pertain to deleting a token: - -`token`:: -(string) An access token. +`username`:: +(string) The username that identifies the user. If you specify the `password` +grant type, this parameter is required. This parameter is not valid with any +other supported grant type. ==== Examples -[[security-api-get-token]] -To obtain a token, submit a POST request to the `/_xpack/security/oauth2/token` -endpoint. + +The following example obtains a token using the `client_credentials` grant type, +which simply creates a token as the authenticated user: [source,js] -------------------------------------------------- POST /_xpack/security/oauth2/token { - "grant_type" : "password", - "username" : "test_admin", - "password" : "x-pack-test-password" + "grant_type" : "client_credentials" } -------------------------------------------------- // CONSOLE @@ -83,12 +83,10 @@ seconds) that the token expires in, and the type: { "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", "type" : "Bearer", - "expires_in" : 1200, - "refresh_token": "vLBPvmAB6KvwvJZr27cS" + "expires_in" : 1200 } -------------------------------------------------- // TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] -// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] The token returned by this API can be used by sending a request with a `Authorization` header with a value having the prefix `Bearer ` followed @@ -100,23 +98,22 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvb -------------------------------------------------- // NOTCONSOLE -[[security-api-refresh-token]] -To extend the life of an existing token, the token api may be called again with the refresh -token within 24 hours of the token's creation. +The following example obtains a token for the `test_admin` user using the +`password` grant type: [source,js] -------------------------------------------------- POST /_xpack/security/oauth2/token { - "grant_type": "refresh_token", - "refresh_token": "vLBPvmAB6KvwvJZr27cS" + "grant_type" : "password", + "username" : "test_admin", + "password" : "x-pack-test-password" } -------------------------------------------------- // CONSOLE -// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] -// TEST[continued] -The API will return a new token and refresh token. Each refresh token may only be used one time. +The following example output contains the access token, the amount of time (in +seconds) that the token expires in, the type, and the refresh token: [source,js] -------------------------------------------------- @@ -130,30 +127,34 @@ The API will return a new token and refresh token. Each refresh token may only b // TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] // TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] -[[security-api-invalidate-token]] -If a token must be invalidated immediately, you can do so by submitting a DELETE -request to `/_xpack/security/oauth2/token`. For example: +[[security-api-refresh-token]] +To extend the life of an existing token obtained using the `password` grant type, +you can call the API again with the refresh token within 24 hours of the token's +creation. For example: [source,js] -------------------------------------------------- -DELETE /_xpack/security/oauth2/token +POST /_xpack/security/oauth2/token { - "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" + "grant_type": "refresh_token", + "refresh_token": "vLBPvmAB6KvwvJZr27cS" } -------------------------------------------------- // CONSOLE -// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] // TEST[continued] -A successful call returns a JSON structure that indicates whether the token -has already been invalidated. +The API will return a new token and refresh token. Each refresh token may only +be used one time. [source,js] -------------------------------------------------- { - "created" : true <1> + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200, + "refresh_token": "vLBPvmAB6KvwvJZr27cS" } -------------------------------------------------- -// TESTRESPONSE - -<1> When a token has already been invalidated, `created` is set to false. +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/security/get-users.asciidoc b/x-pack/docs/en/rest-api/security/get-users.asciidoc new file mode 100644 index 0000000000000..2a20baacb0f52 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-users.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[security-api-get-user]] +=== Get users API + +Retrieves information about users in the native realm. + + +==== Request + +`GET /_xpack/security/user` + + +`GET /_xpack/security/user/` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username`:: + (string) An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves + information about all users. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +To retrieve a native user, submit a GET request to the `/_xpack/security/user/` +endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] + +A successful call returns an array of users with the JSON representation of the +user. Note that user passwords are not included. + +[source,js] +-------------------------------------------------- +{ + "jacknich": { + "username": "jacknich", + "roles": [ + "admin", "other_role1" + ], + "full_name": "Jack Nicholson", + "email": "jacknich@example.com", + "metadata": { "intelligence" : 7 }, + "enabled": true + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +If the user is not defined in the `native` realm, the request 404s. + +Omit the username to retrieve all users: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/x-pack/docs/en/rest-api/security/privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc similarity index 69% rename from x-pack/docs/en/rest-api/security/privileges.asciidoc rename to x-pack/docs/en/rest-api/security/has-privileges.asciidoc index adaf27e97073e..cae1bc4d303fe 100644 --- a/x-pack/docs/en/rest-api/security/privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] -[[security-api-privileges]] -=== Privilege APIs +[[security-api-has-privileges]] +=== Has Privileges API [[security-api-has-privilege]] @@ -15,7 +15,7 @@ a specified list of privileges. ==== Description For a list of the privileges that you can specify in this API, -see {xpack-ref}/security-privileges.html[Security Privileges]. +see {stack-ov}/security-privileges.html[Security privileges]. A successful call returns a JSON structure that shows whether each specified privilege is assigned to the user. @@ -30,6 +30,14 @@ privilege is assigned to the user. `privileges`::: (list) A list of the privileges that you want to check for the specified indices. +`application`:: +`application`::: (string) The name of the application. +`privileges`::: (list) A list of the privileges that you want to check for the +specified resources. May be either application privilege names, or the names of +actions that are granted by those privileges +`resources`::: (list) A list of resource names against which the privileges +should be checked + ==== Authorization All users can use this API, but only to determine their own privileges. @@ -41,7 +49,7 @@ more information, see ==== Examples The following example checks whether the current user has a specific set of -cluster and indices privileges: +cluster, index, and application privileges: [source,js] -------------------------------------------------- @@ -57,6 +65,13 @@ GET _xpack/security/user/_has_privileges "names": [ "inventory" ], "privileges" : [ "read", "write" ] } + ], + "application": [ + { + "application": "inventory_manager", + "privileges" : [ "read", "data:write/inventory" ], + "resources" : [ "product/1852563" ] + } ] } -------------------------------------------------- @@ -85,7 +100,14 @@ The following example output indicates which privileges the "rdeniro" user has: "write" : false } }, - "application" : {} + "application" : { + "inventory_manager" : { + "product/1852563" : { + "read": false, + "data:write/inventory": false + } + } + } } -------------------------------------------------- // TESTRESPONSE[s/"rdeniro"/"$body.username"/] diff --git a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc new file mode 100644 index 0000000000000..f715a80014bea --- /dev/null +++ b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc @@ -0,0 +1,163 @@ +[role="xpack"] +[[security-api-put-privileges]] +=== Create or update application privileges API + +Adds or updates +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`POST /_xpack/security/privilege` + + +`PUT /_xpack/security/privilege` + + +==== Description + +This API creates or updates privileges. To remove privileges, use the +<>. + +For more information, see +{stack-ov}/defining-roles.html#roles-application-priv[Application privileges]. + +To check a user's application privileges, use the +<>. + +==== Request Body + +The body is a JSON object where the names of the fields are the application +names and the value of each field is an object. The fields in this inner +object are the names of the privileges and each value is a JSON object that +includes the following fields: + +`actions`:: (array-of-string) A list of action names that are granted by this +privilege. This field must exist and cannot be an empty array. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + + +[[security-api-app-privileges-validation]] +==== Validation + +Application Names:: + Application names are formed from a _prefix_, with an optional _suffix_ that + conform to the following rules: + * The prefix must begin with a lowercase ASCII letter + * The prefix must contain only ASCII letters or digits + * The prefix must be at least 3 characters long + * If the suffix exists, it must begin with either `-` or `_` + * The suffix cannot contain any of the following characters: + `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*` + * No part of the name can contain whitespace. + +Privilege Names:: + Privilege names must begin with a lowercase ASCII letter and must contain + only ASCII letters and digits along with the characters `_`, `-` and `.` + +Action Names:: + Action names can contain any number of printable ASCII characters and must + contain at least one of the following characters: `/` `*`, `:` + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +To add a single privilege, submit a PUT or POST request to the +`/_xpack/security/privilege//` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "myapp": { + "read": { + "actions": [ <1> + "data:read/*" , <2> + "action:login" ], + "metadata": { <3> + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> These strings have significance within the "myapp" application. {es} does not + assign any meaning to them. +<2> The use of a wildcard here (`*`) means that this privilege grants access to + all actions that start with `data:read/`. {es} does not assign any meaning + to these actions. However, if the request includes an application privilege + such as `data:read/users` or `data:read/settings`, the + <> respects the use of a + wildcard and returns `true`. +<3> The metadata object is optional. + +A successful call returns a JSON structure that shows whether the privilege has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "created": true <1> + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing privilege is updated, `created` is set to false. + +To add multiple privileges, submit a POST request to the +`/_xpack/security/privilege/` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "app01": { + "read": { + "actions": [ "action:login", "data:read/*" ] + }, + "write": { + "actions": [ "action:login", "data:write/*" ] + } + }, + "app02": { + "all": { + "actions": [ "*" ] + } + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the privileges +have been created or updated. + +[source,js] +-------------------------------------------------- +{ + "app02": { + "all": { + "created": true + } + }, + "app01": { + "read": { + "created": true + }, + "write": { + "created": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc new file mode 100644 index 0000000000000..be4afc57a1a54 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[[role-mapping-resources]] +=== Role mapping resources + +A role mapping resource has the following properties: + +`enabled`:: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles`:: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules`:: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. The DSL supports the following rule types: +`any`::: +(array of rules) If *any* of its children are true, it evaluates to `true`. +`all`::: +(array of rules) If *all* of its children are true, it evaluates to `true`. +`field`::: +(object) See <>. +`except`:: +(object) A single rule as an object. Only valid as a child of an `all` rule. If +its child is `false`, the `except` is `true`. + + +[float] +[[mapping-roles-rule-field]] +==== Field rules + +The `field` rule is the primary building block for a role mapping expression. +It takes a single object as its value and that object must contain a single +member with key _F_ and value _V_. The field rule looks up the value of _F_ +within the user object and then tests whether the user value _matches_ the +provided value _V_. + +The value specified in the field rule can be one of the following types: +[cols="2,5,3m"] +|======================= +| Type | Description | Example + +| Simple String | Exactly matches the provided value. | "esadmin" +| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" +| Regular Expression | Matches the provided value using a + {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" +| Number | Matches an equivalent numerical value. | 7 +| Null | Matches a null or missing value. | null +| Array | Tests each element in the array in + accordance with the above definitions. + If _any_ of elements match, the match is successful. | ["admin", "operator"] +|======================= + +[float] +===== User fields + +The _user object_ against which rules are evaluated has the following fields: + +`username`:: +(string) The username by which {security} knows this user. For example, `"username": "jsmith"`. +`dn`:: +(string) The _Distinguished Name_ of the user. For example, `"dn": "cn=jsmith,ou=users,dc=example,dc=com",`. +`groups`:: +(array of strings) The groups to which the user belongs. For example, `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com","cn=esusers,ou=groups,dc=example,dc=com ]`. +`metadata`:: +(object) Additional metadata for the user. For example, `"metadata": { "cn": "John Smith" }`. +`realm`:: +(object) The realm that authenticated the user. The only field in this object is the realm name. For example, `"realm": { "name": "ldap1" }`. + +The `groups` field is multi-valued; a user can belong to many groups. When a +`field` rule is applied against a multi-valued field, it is considered to match +if _at least one_ of the member values matches. For example, the following rule +matches any user who is a member of the `admin` group, regardless of any +other groups they belong to: + +[source, js] +------------------------------------------------------------ +{ "field" : { "groups" : "admin" } } +------------------------------------------------------------ +// NOTCONSOLE + +For additional realm-specific details, see +{stack-ov}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. diff --git a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc deleted file mode 100644 index 3844e30c62dc0..0000000000000 --- a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc +++ /dev/null @@ -1,404 +0,0 @@ -[role="xpack"] -[[security-api-role-mapping]] -=== Role Mapping APIs - -The Role Mapping API enables you to add, remove, and retrieve role mappings. - -==== Request - -`GET /_xpack/security/role_mapping` + - -`GET /_xpack/security/role_mapping/` + - -`DELETE /_xpack/security/role_mapping/` + - -`POST /_xpack/security/role_mapping/` + - -`PUT /_xpack/security/role_mapping/` - -==== Description - -Role mappings have _rules_ that identify users and a list of _roles_ that are -granted to those users. - -NOTE: This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using <> or -{xpack-ref}/defining-roles.html#roles-management-file[roles files]. - -The role mapping rule is a logical condition that is expressed using a JSON DSL. -The DSL supports the following rule types: - -|======================= -| Type | Value Type (child) | Description - -| `any` | An array of rules | If *any* of its children are true, it - evaluates to `true`. -| `all` | An array of rules | If *all* of its children are true, it - evaluates to `true`. -| `field` | An object | See <> -| `except` | A single rule as an object | Only valid as a child of an `all` - rule. If its child is `false`, the - `except` is `true`. -|======================= - -[float] -[[mapping-roles-rule-field]] -===== The Field Rule - -The `field` rule is the primary building block for a role-mapping expression. -It takes a single object as its value and that object must contain a single -member with key _F_ and value _V_. The field rule looks up the value of _F_ -within the user object and then tests whether the user value _matches_ the -provided value _V_. - -The value specified in the field rule can be one of the following types: -[cols="2,5,3m"] -|======================= -| Type | Description | Example - -| Simple String | Exactly matches the provided value. | "esadmin" -| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" -| Regular Expression | Matches the provided value using a - {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" -| Number | Matches an equivalent numerical value. | 7 -| Null | Matches a null or missing value. | null -| Array | Tests each element in the array in - accordance with the above definitions. - If _any_ of elements match, the match is successful. | ["admin", "operator"] -|======================= - -===== User Fields - -The _user object_ against which rules are evaluated has the following fields: -[cols="1s,,,m"] -|======================= -| Name | Type | Description | Example - -| username | string | The username by which {security} knows this user. | `"username": "jsmith"` -| dn | string | The _Distinguished Name_ of the user. | `"dn": "cn=jsmith,ou=users,dc=example,dc=com",` -| groups | array-of-string | The groups to which the user belongs. | `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com", -"cn=esusers,ou=groups,dc=example,dc=com ]` -| metadata | object | Additional metadata for the user. | `"metadata": { "cn": "John Smith" }` -| realm | object | The realm that authenticated the user. The only field in this object is the realm name. | `"realm": { "name": "ldap1" }` -|======================= - -The `groups` field is multi-valued; a user can belong to many groups. When a -`field` rule is applied against a multi-valued field, it is considered to match -if _at least one_ of the member values matches. For example, the following rule -matches any user who is a member of the `admin` group, regardless of any -other groups they belong to: - -[source, js] ------------------------------------------------------------- -{ "field" : { "groups" : "admin" } } ------------------------------------------------------------- -// NOTCONSOLE - -For additional realm-specific details, see -{xpack-ref}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. - - -==== Path Parameters - -`name`:: - (string) The distinct name that identifies the role mapping. The name is - used solely as an identifier to facilitate interaction via the API; it does - not affect the behavior of the mapping in any way. If you do not specify this - parameter for the Get Role Mappings API, it returns information about all - role mappings. - - -==== Request Body - -The following parameters can be specified in the body of a PUT or POST request -and pertain to adding a role mapping: - -`enabled` (required):: -(boolean) Mappings that have `enabled` set to `false` are ignored when role -mapping is performed. - -`metadata`:: -(object) Additional metadata that helps define which roles are assigned to each -user. Within the `metadata` object, keys beginning with `_` are reserved for -system usage. - -`roles` (required):: -(list) A list of roles that are granted to the users that match the role-mapping -rules. - -`rules` (required):: -(object) The rules that determine which users should be matched by the mapping. -A rule is a logical condition that is expressed by using a JSON DSL. - - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster privilege. - - -==== Examples - -[[security-api-put-role-mapping]] -To add a role mapping, submit a PUT or POST request to the `/_xpack/security/role_mapping/` endpoint. The following example assigns -the "user" role to all users: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping1 -{ - "roles": [ "user"], - "enabled": true, <1> - "rules": { - "field" : { "username" : "*" } - }, - "metadata" : { <2> - "version" : 1 - } -} ------------------------------------------------------------- -// CONSOLE -<1> Mappings that have `enabled` set to `false` are ignored when role mapping - is performed. -<2> Metadata is optional. - -A successful call returns a JSON structure that shows whether the mapping has -been created or updated. - -[source,js] --------------------------------------------------- -{ - "role_mapping" : { - "created" : true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing mapping is updated, `created` is set to false. - -The following example assigns the "user" and "admin" roles to specific users: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role_mapping/mapping2 -{ - "roles": [ "user", "admin" ], - "enabled": true, - "rules": { - "field" : { "username" : [ "esadmin01", "esadmin02" ] } - } -} --------------------------------------------------- -// CONSOLE - -The following example matches any user where either the username is `esadmin` -or the user is in the `cn=admin,dc=example,dc=com` group: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping3 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "any": [ - { - "field": { - "username": "esadmin" - } - }, - { - "field": { - "groups": "cn=admins,dc=example,dc=com" - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users who authenticated against a specific realm: -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping4 -{ - "roles": [ "ldap-user" ], - "enabled": true, - "rules": { - "field" : { "realm.name" : "ldap1" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a specific LDAP sub-tree: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping5 -{ - "roles": [ "example-user" ], - "enabled": true, - "rules": { - "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a particular LDAP sub-tree in a -specific realm: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping6 -{ - "roles": [ "ldap-example-user" ], - "enabled": true, - "rules": { - "all": [ - { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, - { "field" : { "realm.name" : "ldap1" } } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The rules can be more complex and include wildcard matching. For example, the -following mapping matches any user where *all* of these conditions are met: - -- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, - or the username is `es-admin`, or the username is `es-system` -- the user in in the `cn=people,dc=example,dc=com` group -- the user does not have a `terminated_date` - - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping7 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ "es-admin", "es-system" ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -[[security-api-get-role-mapping]] -To retrieve a role mapping, issue a GET request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/role_mapping/mapping7 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call retrieves an object, where the keys are the -names of the request mappings, and the values are -the JSON representation of those mappings. -If there is no mapping with the requested name, the -response will have status code `404`. - -[source,js] --------------------------------------------------- -{ - "mapping7": { - "enabled": true, - "roles": [ - "superuser" - ], - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ - "es-admin", - "es-system" - ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - }, - "metadata": {} - } -} --------------------------------------------------- -// TESTRESPONSE - -You can specify multiple mapping names as a comma-separated list. -To retrieve all mappings, omit the name entirely. - -[[security-api-delete-role-mapping]] -To delete a role mapping, submit a DELETE request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/role_mapping/mapping1 --------------------------------------------------- -// CONSOLE -// TEST[setup:role_mapping] - -If the mapping is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/roles.asciidoc b/x-pack/docs/en/rest-api/security/roles.asciidoc deleted file mode 100644 index 28c09c560ec3c..0000000000000 --- a/x-pack/docs/en/rest-api/security/roles.asciidoc +++ /dev/null @@ -1,205 +0,0 @@ -[role="xpack"] -[[security-api-roles]] -=== Role Management APIs - -The Roles API enables you to add, remove, and retrieve roles in the `native` -realm. - -==== Request - -`GET /_xpack/security/role` + - -`GET /_xpack/security/role/` + - -`DELETE /_xpack/security/role/` + - -`POST /_xpack/security/role//_clear_cache` + - -`POST /_xpack/security/role/` + - -`PUT /_xpack/security/role/` - - -==== Description - -The Roles API is generally the preferred way to manage roles, rather than using -file-based role management. For more information, see -{xpack-ref}/authorization.html[Configuring Role-based Access Control]. - - -==== Path Parameters - -`name`:: - (string) The name of the role. If you do not specify this parameter, the - Get Roles API returns information about all roles. - - -==== Request Body - -The following parameters can be specified in the body of a PUT or POST request -and pertain to adding a role: - -`cluster`:: (list) A list of cluster privileges. These privileges define the -cluster level actions that users with this role are able to execute. - -`indices`:: (list) A list of indices permissions entries. -`field_security`::: (list) The document fields that the owners of the role have -read access to. For more information, see -{xpack-ref}/field-and-document-access-control.html[Setting Up Field and Document Level Security]. -`names` (required)::: (list) A list of indices (or index name patterns) to which the -permissions in this entry apply. -`privileges`(required)::: (list) The index level privileges that the owners of the role -have on the specified indices. -`query`::: A search query that defines the documents the owners of the role have -read access to. A document within the specified indices must match this query in -order for it to be accessible by the owners of the role. - -`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys -that begin with `_` are reserved for system usage. - -`run_as`:: (list) A list of users that the owners of this role can impersonate. -For more information, see -{xpack-ref}/run-as-privilege.html[Submitting Requests on Behalf of Other Users]. - -For more information, see {xpack-ref}/defining-roles.html[Defining Roles]. - - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster -privilege. - - -==== Examples - -[[security-api-put-role]] -To add a role, submit a PUT or POST request to the `/_xpack/security/role/` -endpoint: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role/my_admin_role -{ - "cluster": ["all"], - "indices": [ - { - "names": [ "index1", "index2" ], - "privileges": ["all"], - "field_security" : { // optional - "grant" : [ "title", "body" ] - }, - "query": "{\"match\": {\"title\": \"foo\"}}" // optional - } - ], - "run_as": [ "other_user" ], // optional - "metadata" : { // optional - "version" : 1 - } -} --------------------------------------------------- -// CONSOLE - -A successful call returns a JSON structure that shows whether the role has been -created or updated. - -[source,js] --------------------------------------------------- -{ - "role": { - "created": true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing role is updated, `created` is set to false. - -[[security-api-get-role]] -To retrieve a role from the `native` Security realm, issue a GET request to the -`/_xpack/security/role/` endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/role/my_admin_role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call returns an array of roles with the JSON representation of the -role. If the role is not defined in the `native` realm, the request 404s. - -[source,js] --------------------------------------------------- -{ - "my_admin_role": { - "cluster" : [ "all" ], - "indices" : [ { - "names" : [ "index1", "index2" ], - "privileges" : [ "all" ], - "field_security" : { - "grant" : [ "title", "body" ] - }, - "query" : "{\"match\": {\"title\": \"foo\"}}" - } ], - "applications" : [ ], - "run_as" : [ "other_user" ], - "metadata" : { - "version" : 1 - }, - "transient_metadata": { - "enabled": true - } - } -} --------------------------------------------------- -// TESTRESPONSE - -You can specify multiple roles as a comma-separated list. To retrieve all roles, -omit the role name. - -[source,js] --------------------------------------------------- -# Retrieve roles "r1", "r2", and "my_admin_role" -GET /_xpack/security/role/r1,r2,my_admin_role - -# Retrieve all roles -GET /_xpack/security/role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -NOTE: If single role is requested, that role is returned as the response. When -requesting multiple roles, an object is returned holding the found roles, each -keyed by the relevant role name. - -[[security-api-delete-role]] -To delete a role, submit a DELETE request to the `/_xpack/security/role/` -endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/role/my_admin_role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -If the role is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE - -[[security-api-clear-role-cache]] -The Clear Roles Cache API evicts roles from the native role cache. To clear the -cache for a role, submit a POST request `/_xpack/security/role//_clear_cache` -endpoint: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role/my_admin_role/_clear_cache --------------------------------------------------- -// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/users.asciidoc b/x-pack/docs/en/rest-api/security/users.asciidoc deleted file mode 100644 index c84da5c7d75ff..0000000000000 --- a/x-pack/docs/en/rest-api/security/users.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -[role="xpack"] -[[security-api-users]] -=== User Management APIs - -The `user` API enables you to create, read, update, and delete users from the -`native` realm. These users are commonly referred to as *native users*. - - -==== Request - -`GET /_xpack/security/user` + - -`GET /_xpack/security/user/` + - -`DELETE /_xpack/security/user/` + - -`POST /_xpack/security/user/` + - -`PUT /_xpack/security/user/` + - -`PUT /_xpack/security/user//_disable` + - -`PUT /_xpack/security/user//_enable` + - -`PUT /_xpack/security/user//_password` - - -==== Description - -You can use the PUT user API to create or update users. When updating a user, -you can update everything but its `username` and `password`. To change a user's -password, use the <>. - -[[username-validation]] -NOTE: Usernames must be at least 1 and no more than 1024 characters. They can -contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and -printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. -Leading or trailing whitespace is not allowed. - -==== Path Parameters - -`username`:: - (string) An identifier for the user. If you omit this parameter from a Get - User API request, it retrieves information about all users. - - -==== Request Body - -The following parameters can be specified in the body of a POST or PUT request -and pertain to creating a user: - -`enabled`:: -(boolean) Specifies whether the user is enabled. The default value is `true`. - -`email`:: -(string) The email of the user. - -`full_name`:: -(string) The full name of the user. - -`metadata`:: -(object) Arbitrary metadata that you want to associate with the user. - -`password` (required):: -(string) The user's password. Passwords must be at least 6 characters long. - -`roles` (required):: -(list) A set of roles the user has. The roles determine the user's access -permissions. To create a user without any roles, specify an empty list: `[]`. - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster privilege. - - -==== Examples - -[[security-api-put-user]] -To add a user, submit a PUT or POST request to the `/_xpack/security/user/` -endpoint. - -[source,js] --------------------------------------------------- -POST /_xpack/security/user/jacknich -{ - "password" : "j@rV1s", - "roles" : [ "admin", "other_role1" ], - "full_name" : "Jack Nicholson", - "email" : "jacknich@example.com", - "metadata" : { - "intelligence" : 7 - } -} --------------------------------------------------- -// CONSOLE - -A successful call returns a JSON structure that shows whether the user has been -created or updated. - -[source,js] --------------------------------------------------- -{ - "user": { - "created" : true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing user is updated, `created` is set to false. - -After you add a user through the Users API, requests from that user can be -authenticated. For example: - -[source,shell] --------------------------------------------------- -curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health --------------------------------------------------- -// NOTCONSOLE - -[[security-api-get-user]] -To retrieve a native user, submit a GET request to the `/_xpack/security/user/` -endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user/jacknich --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call returns an array of users with the JSON representation of the -user. Note that user passwords are not included. - -[source,js] --------------------------------------------------- -{ - "jacknich": { <1> - "username" : "jacknich", - "roles" : [ "admin", "other_role1" ], - "full_name" : "Jack Nicholson", - "email" : "jacknich@example.com", - "enabled": true, - "metadata" : { - "intelligence" : 7 - } - } -} --------------------------------------------------- -// TESTRESPONSE -<1> If the user is not defined in the `native` realm, the request 404s. - -You can specify multiple usernames as a comma-separated list: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user/jacknich,rdinero --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Omit the username to retrieve all users: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-reset-user-password]] -To reset the password for a user, submit a PUT request to the -`/_xpack/security/user//_password` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_password -{ - "password" : "s3cr3t" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-disable-user]] -To disable a user, submit a PUT request to the -`/_xpack/security/user//_disable` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_disable --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-enable-user]] -To enable a user, submit a PUT request to the -`/_xpack/security/user//_enable` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_enable --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-delete-user]] -To delete a user, submit a DELETE request to the `/_xpack/security/user/` -endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/user/jacknich --------------------------------------------------- -// CONSOLE -// TEST[continued] - -If the user is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/x-pack/docs/en/rollup/api-quickref.asciidoc deleted file mode 100644 index 937c6a84e5e14..0000000000000 --- a/x-pack/docs/en/rollup/api-quickref.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[[rollup-api-quickref]] -== API Quick Reference - -experimental[] - -Most {rollup} endpoints have the following base: - -[source,js] ----- -/_xpack/rollup/ ----- -// NOTCONSOLE - -[float] -[[rollup-api-jobs]] -=== /job/ - -* {ref}/rollup-put-job.html[PUT /job/+++]: Create a job -* {ref}/rollup-get-job.html[GET /job]: List jobs -* {ref}/rollup-get-job.html[GET /job/+++]: Get job details -* {ref}/rollup-start-job.html[POST /job//_start]: Start a job -* {ref}/rollup-stop-job.html[POST /job//_stop]: Stop a job -* {ref}/rollup-delete-job.html[DELETE /job/+++]: Delete a job - -[float] -[[rollup-api-data]] -=== /data/ - -* {ref}/rollup-get-rollup-caps.html[GET /data//_rollup_caps+++]: Get Rollup Capabilities - -[float] -[[rollup-api-index]] -=== // - -* {ref}/rollup-search.html[GET //_rollup_search]: Search rollup data diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index 6298bb8ef9f54..ba554eb8595dd 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -173,7 +173,7 @@ represent user roles for different systems in the organization. The `active_directory` realm enables you to map Active Directory users to roles via their Active Directory groups or other metadata. This role mapping can be -configured via the <> or by using +configured via the <> or by using a file stored on each node. When a user authenticates against an Active Directory realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index 683da76bb7b99..fbf823dae7060 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -55,18 +55,23 @@ cluster. + -- The `users` file stores all the users and their passwords. Each line in the file -represents a single user entry consisting of the username and **hashed** password. +represents a single user entry consisting of the username and **hashed** and **salted** password. [source,bash] ---------------------------------------------------------------------- rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS -jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +jacknich:{PBKDF2}50000$z1CLJt0MEFjkIK5iEfgvfnA6xq7lF25uasspsTKSo5Q=$XxCVLbaKDimOdyWgLCLJiyoiWpA/XDMe/xtVgn1r5Sg= ---------------------------------------------------------------------- -{security} uses `bcrypt` to hash the user passwords. +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the file realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -While it is possible to modify this files directly using any standard text +While it is possible to modify the `users` files directly using any standard text editor, we strongly recommend using the <> tool to apply the required changes. @@ -103,4 +108,4 @@ By default, {security} checks these files for changes every 5 seconds. You can change this default behavior by changing the `resource.reload.interval.high` setting in the `elasticsearch.yml` file (as this is a common setting in {es}, changing its value may effect other schedules in the system). --- \ No newline at end of file +-- diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc new file mode 100644 index 0000000000000..9e7ed4762728a --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -0,0 +1,175 @@ +[role="xpack"] +[[configuring-kerberos-realm]] +=== Configuring a Kerberos realm + +Kerberos is used to protect services and uses a ticket-based authentication +protocol to authenticate users. +You can configure {es} to use the Kerberos V5 authentication protocol, which is +an industry standard protocol, to authenticate users. +In this scenario, clients must present Kerberos tickets for authentication. + +In Kerberos, users authenticate with an authentication service and later +with a ticket granting service to generate a TGT (ticket-granting ticket). +This ticket is then presented to the service for authentication. +Refer to your Kerberos installation documentation for more information about +obtaining TGT. {es} clients must first obtain a TGT then initiate the process of +authenticating with {es}. + +For a summary of Kerberos terminology, see {stack-ov}/kerberos-realm.html[Kerberos authentication]. + +==== Before you begin + +. Deploy Kerberos. ++ +-- +You must have the Kerberos infrastructure set up in your environment. + +NOTE: Kerberos requires a lot of external services to function properly, such as +time synchronization between all machines and working forward and reverse DNS +mappings in your domain. Refer to your Kerberos documentation for more details. + +These instructions do not cover setting up and configuring your Kerberos +deployment. Where examples are provided, they pertain to an MIT Kerberos V5 +deployment. For more information, see +http://web.mit.edu/kerberos/www/index.html[MIT Kerberos documentation] +-- + +. Configure Java GSS. ++ +-- + +{es} uses Java GSS framework support for Kerberos authentication. +To support Kerberos authentication, {es} needs the following files: + +* `krb5.conf`, a Kerberos configuration file +* A `keytab` file that contains credentials for the {es} service principal + +The configuration requirements depend on your Kerberos setup. Refer to your +Kerberos documentation to configure the `krb5.conf` file. + +For more information on Java GSS, see +https://docs.oracle.com/javase/10/security/kerberos-requirements1.htm[Java GSS Kerberos requirements] +-- + +==== Create a Kerberos realm + +To configure a Kerberos realm in {es}: + +. Configure the JVM to find the Kerberos configuration file. ++ +-- +{es} uses Java GSS and JAAS Krb5LoginModule to support Kerberos authentication +using a Simple and Protected GSSAPI Negotiation Mechanism (SPNEGO) mechanism. +The Kerberos configuration file (`krb5.conf`) provides information such as the +default realm, the Key Distribution Center (KDC), and other configuration details +required for Kerberos authentication. When the JVM needs some configuration +properties, it tries to find those values by locating and loading this file. The +JVM system property to configure the file path is `java.security.krb5.conf`. To +configure JVM system properties see {ref}/jvm-options.html[configuring jvm options]. +If this system property is not specified, Java tries to locate the file based on +the conventions. + +TIP: It is recommended that this system property be configured for {es}. +The method for setting this property depends on your Kerberos infrastructure. +Refer to your Kerberos documentation for more details. + +For more information, see http://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html[krb5.conf] + +-- + +. Create a keytab for the {es} node. ++ +-- +A keytab is a file that stores pairs of principals and encryption keys. {es} +uses the keys from the keytab to decrypt the tickets presented by the user. You +must create a keytab for {es} by using the tools provided by your Kerberos +implementation. For example, some tools that create keytabs are `ktpass.exe` on +Windows and `kadmin` for MIT Kerberos. +-- + +. Put the keytab file in the {es} configuration directory. ++ +-- +Make sure that this keytab file has read permissions. This file contains +credentials, therefore you must take appropriate measures to protect it. + +IMPORTANT: {es} uses Kerberos on the HTTP network layer, therefore there must be +a keytab file for the HTTP service principal on every {es} node. The service +principal name must have the format `HTTP/es.domain.local@ES.DOMAIN.LOCAL`. +The keytab files are unique for each node since they include the hostname. +An {es} node can act as any principal a client requests as long as that +principal and its credentials are found in the configured keytab. + +-- + +. Create a Kerberos realm. ++ +-- + +To enable Kerberos authentication in {es}, you must add a Kerberos realm in the +realm chain. + +NOTE: You can configure only one Kerberos realm on {es} nodes. + +To configure a Kerberos realm, there are a few mandatory realm settings and +other optional settings that you need to configure in the `elasticsearch.yml` +configuration file. Add a realm of type `kerberos` under the +`xpack.security.authc.realms` namespace. + +The most common configuration for a Kerberos realm is as follows: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.kerb1: + type: kerberos + order: 3 + keytab.path: es.keytab + remove_realm_name: false +------------------------------------------------------------ + +The `username` is extracted from the ticket presented by user and usually has +the format `username@REALM`. This `username` is used for mapping +roles to the user. If realm setting `remove_realm_name` is +set to `true`, the realm part (`@REALM`) is removed. The resulting `username` +is used for role mapping. + +For detailed information of available realm settings, +see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +-- + +. Restart {es} + +. Map Kerberos users to roles. ++ +-- + +The `kerberos` realm enables you to map Kerberos users to roles. You can +configure these role mappings by using the +{ref}/security-api-role-mapping.html[role-mapping API]. You identify +users by their `username` field. + +The following example uses the role mapping API to map `user@REALM` to the roles +`monitoring` and `user`: + +[source,js] +-------------------------------------------------- +POST _xpack/security/role_mapping/kerbrolemapping +{ + "roles" : [ "monitoring_user" ], + "enabled": true, + "rules" : { + "field" : { "username" : "user@REALM" } + } +} +-------------------------------------------------- +// CONSOLE + +For more information, see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +NOTE: The Kerberos realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + +-- + diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index e32c9eb5300b3..a5f8c3e441205 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -133,7 +133,7 @@ supports both failover and load balancing modes of operation. See -- The `ldap` realm enables you to map LDAP users to to roles via their LDAP groups, or other metadata. This role mapping can be configured via the -{ref}/security-api-role-mapping.html[role-mapping API] or by using a file stored +{ref}/security-api-put-role-mapping.html[add role mapping API] or by using a file stored on each node. When a user authenticates with LDAP, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. @@ -189,6 +189,11 @@ For more information, see {xpack-ref}/ldap-realm.html#mapping-roles-ldap[Mapping LDAP Groups to Roles] and {xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. + +NOTE: The LDAP realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + -- . (Optional) Configure the `metadata` setting on the LDAP realm to include extra @@ -211,4 +216,4 @@ xpack: type: ldap metadata: cn -------------------------------------------------- --- \ No newline at end of file +-- diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc index 3cda29c2c711f..e9fb9cd0eb8a0 100644 --- a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -34,6 +34,13 @@ xpack: type: native order: 0 ------------------------------------------------------------ + +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the native realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -- . Restart {es}. diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index f66a82b06641e..9a4d5fcf18bf6 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -10,7 +10,8 @@ NOTE: You cannot use PKI certificates to authenticate users in {kib}. To use PKI in {es}, you configure a PKI realm, enable client authentication on the desired network layers (transport or http), and map the Distinguished Names -(DNs) from the user certificates to {security} roles in the role mapping file. +(DNs) from the user certificates to {security} roles in the +<> or role-mapping file. You can also use a combination of PKI and username/password authentication. For example, you can enable SSL/TLS on the transport layer and define a PKI realm to @@ -126,7 +127,7 @@ The `certificate_authorities` option can be used as an alternative to the + -- You map roles for PKI users through the -<> or by using a file stored on +<> or by using a file stored on each node. When a user authenticates against a PKI realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. @@ -173,4 +174,9 @@ key. You can also use the authenticate API to validate your role mapping. For more information, see {xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. --- \ No newline at end of file + +NOTE: The PKI realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + +-- diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc index cbcbeebb359ef..d16e13025509d 100644 --- a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -219,6 +219,11 @@ access any data. Your SAML users cannot do anything until they are mapped to {security} roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. + +NOTE: The SAML realm supports +{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +alternative to role mapping. + -- . {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 7139f4f81987d..b0077dc1ba9d4 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -76,12 +76,13 @@ binding. There are five configuration steps to enable SAML authentication in {es}: -. Enable SSL/TLS for HTTP -. Enable the Token Service -. Create one or more SAML realms -. Configure role mappings +. <> +. <> +. <> +. <> . Generate a SAML Metadata file for use by your Identity Provider _(optional)_ +[[saml-enable-http]] ==== Enable TLS for HTTP If your {es} cluster is operating in production mode, then you must @@ -91,6 +92,7 @@ authentication. For more information, see {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. +[[saml-enable-token]] ==== Enable the token service The {es} SAML implementation makes use of the {es} Token Service. This service @@ -356,6 +358,35 @@ address such as `admin@staff.example.com.attacker.net`. It is important that you make sure your regular expressions are as precise as possible so that you do not inadvertently open an avenue for user impersonation attacks. +[[req-authn-context]] +==== Requesting specific authentication methods + +It is sometimes necessary for a SAML SP to be able to impose specific +restrictions regarding the authentication that will take place at an IdP, +in order to assess the level of confidence that it can place in +the corresponding authentication response. The restrictions might have to do +with the authentication method (password, client certificates, etc), the +user identification method during registration, and other details. {es} implements +https://docs.oasis-open.org/security/saml/v2.0/saml-authn-context-2.0-os.pdf[SAML 2.0 Authentication Context], which can be used for this purpose as defined in SAML 2.0 Core +Specification. + +In short, the SAML SP defines a set of Authentication Context Class Reference +values, which describe the restrictions to be imposed on the IdP, and sends these +in the Authentication Request. The IdP attempts to grant these restrictions. +If it cannot grant them, the authentication attempt fails. If the user is +successfully authenticated, the Authentication Statement of the SAML Response +contains an indication of the restrictions that were satisfied. + +You can define the Authentication Context Class Reference values by using the `req_authn_context_class_ref` option in the SAML realm configuration. See +{ref}/security-settings.html#ref-saml-settings[SAML realm settings]. + +{es} supports only the `exact` comparison method for the Authentication Context. +When it receives the Authentication Response from the IdP, {es} examines the +value of the Authentication Context Class Reference that is part of the +Authentication Statement of the SAML Assertion. If it matches one of the +requested values, the authentication is considered successful. Otherwise, the +authentication attempt fails. + [[saml-logout]] ==== SAML logout @@ -442,7 +473,7 @@ or separate keys used for each of those. The Elastic Stack uses X.509 certificates with RSA private keys for SAML cryptography. These keys can be generated using any standard SSL tool, including -the `elasticsearch-certutil` tool that ships with X-Pack. +the `elasticsearch-certutil` tool that ships with {xpack}. Your IdP may require that the Elastic Stack have a cryptographic key for signing SAML messages, and that you provide the corresponding signing certificate within @@ -573,6 +604,7 @@ The passphrase for the keystore, if the file is encypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. +[[saml-sp-metadata]] === Generating SP metadata Some Identity Providers support importing a metadata file from the Service @@ -592,9 +624,10 @@ When a user authenticates using SAML, they are identified to the Elastic Stack, but this does not automatically grant them access to perform any actions or access any data. -Your SAML users cannot do anything until they are mapped to X-Pack Security -roles. This mapping is performed through the -{ref}/security-api-role-mapping.html[role-mapping API] +Your SAML users cannot do anything until they are assigned {security} +roles. This is done through either the +{ref}/security-api-put-role-mapping.html[add role mapping API], or with +<>. This is an example of a simple role mapping that grants the `kibana_user` role to any user who authenticates against the `saml1` realm: @@ -626,7 +659,7 @@ mapping are derived from the SAML attributes as follows: - `metadata`: See <> For more information, see <> and -{ref}/security-api-role-mapping.html[Role Mapping APIs]. +{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. If your IdP has the ability to provide groups or roles to Service Providers, then you should map this SAML attribute to the `attributes.groups` setting in @@ -651,6 +684,18 @@ PUT /_xpack/security/role_mapping/saml-finance // CONSOLE // TEST +If your users also exist in a repository that can be directly accessed by {security} +(such as an LDAP directory) then you can use +<> instead of role mappings. + +In this case, you perform the following steps: +1. In your SAML realm, assigned a SAML attribute to act as the lookup userid, + by configuring the `attributes.principal` setting. +2. Create a new realm that can lookup users from your local repository (e.g. an + `ldap` realm) +3. In your SAML realm, set `authorization_realms` to the name of the realm you + created in step 2. + [[saml-user-metadata]] === User metadata diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index 36af070bf067b..716e7af99145c 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -12,27 +12,8 @@ object to avoid unnecessarily needing to perform role mapping on each request. The cached user credentials are hashed in memory. By default, {security} uses a salted `sha-256` hash algorithm. You can use a different hashing algorithm by -setting the `cache_hash_algo` setting to any of the following: - -[[cache-hash-algo]] -.Cache hash algorithms -|======================= -| Algorithm | | | Description -| `ssha256` | | | Uses a salted `sha-256` algorithm (default). -| `md5` | | | Uses `MD5` algorithm. -| `sha1` | | | Uses `SHA1` algorithm. -| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. -| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. -| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. -| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. -| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. -| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. -| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. -| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in - memory. CAUTION: keeping clear text is considered insecure - and can be compromised at the OS level (for example through - memory dumps and using `ptrace`). -|======================= +setting the `cache.hash_algo` realm settings. See +{ref}/security-settings.html#hashing-settings[User cache and password hash algorithms]. [[cache-eviction-api]] ==== Evicting users from the cache diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index b8a01aa4519c6..7b30284f58365 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -9,7 +9,10 @@ A role is defined by the following JSON structure: { "run_as": [ ... ], <1> "cluster": [ ... ], <2> - "indices": [ ... ] <3> + "global": { ... }, <3> + "indices": [ ... ], <4> + "applications": [ ... ] <5> + } ----- // NOTCONSOLE @@ -19,8 +22,15 @@ A role is defined by the following JSON structure: cluster level actions users with this role are able to execute. This field is optional (missing `cluster` privileges effectively mean no cluster level permissions). -<3> A list of indices permissions entries. This field is optional (missing `indices` +<3> An object defining global privileges. A global privilege is a form of + cluster privilege that is request sensitive. A standard cluster privilege + makes authorization decisions based solely on the action being executed. + A global privilege also considers the parameters included in the request. + Support for global privileges is currently limited to the management of + application privileges. This field is optional. +<4> A list of indices permissions entries. This field is optional (missing `indices` privileges effectively mean no index level permissions). +<5> A list of application privilege entries. This field is optional. [[valid-role-name]] NOTE: Role names must be at least 1 and no more than 1024 characters. They can @@ -28,6 +38,9 @@ NOTE: Role names must be at least 1 and no more than 1024 characters. They can punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed. +[[roles-indices-priv]] +==== Indices Privileges + The following describes the structure of an indices permissions entry: [source,js] @@ -77,8 +90,60 @@ names or regular expressions that refer to multiple indices. ------------------------------------------------------------------------------ ============================================================================== -The following snippet shows an example definition of a `clicks_admin` role: +[[roles-global-priv]] +==== Global Privileges +The following describes the structure of a global privileges entry: + +[source,js] +------- +{ + "application": { + "manage": { <1> + "applications": [ ... ] <2> + } + } +} +------- +// NOTCONSOLE + +<1> The only supported global privilege is the ability to manage application + privileges +<2> The list of application names that may be managed. This list supports + wildcards (e.g. `"myapp-*"`) and regular expressions (e.g. + `"/app[0-9]*/"`) + +[[roles-application-priv]] +==== Application Privileges +The following describes the structure of an application privileges entry: +[source,js] +------- +{ + "application": "my_app", <1> + "privileges": [ ... ], <2> + "resources": [ ... ] <3> +} +------- +// NOTCONSOLE + +<1> The name of the application. +<2> The list of the names of the application privileges to grant to this role. +<3> The resources to which those privileges apply. These are handled in the same + way as index name pattern in `indices` permissions. These resources do not + have any special meaning to {security}. + +For details about the validation rules for these fields, see the +{ref}/security-api-put-privileges.html[add application privileges API]. + +A role may refer to application privileges that do not exist - that is, they +have not yet been defined through the add application privileges API (or they +were defined, but have since been deleted). In this case, the privilege has +no effect, and will not grant any actions in the +{ref}/security-api-has-privileges.html[has privileges API]. + +==== Example + +The following snippet shows an example definition of a `clicks_admin` role: [source,js] ----------- POST /_xpack/security/role/clicks_admin @@ -130,7 +195,7 @@ manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api-roles.html[Role Management APIs]. +see {ref}/security-api.html#security-role-apis[role management APIs]. [float] [[roles-management-file]] diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index cf8373a65f335..166238c32ac51 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -18,17 +18,20 @@ the API, and other roles that are mapped through files. When you use role-mappings, you assign existing roles to users. The available roles should either be added using the -{ref}/security-api-roles.html[Role Management APIs] or defined in the +{ref}/security-api.html#security-role-apis[role management APIs] or defined in the <>. Either role-mapping method can use either role management method. For example, when you use the role mapping API, you are able to map users to both API-managed roles and file-managed roles (and likewise for file-based role-mappings). +NOTE: The PKI, LDAP, Kerberos and SAML realms support using +<> as an alternative to role mapping. + [[mapping-roles-api]] ==== Using the role mapping API You can define role-mappings through the -{ref}/security-api-role-mapping.html[role mapping API]. +{ref}/security-api-put-role-mapping.html[add role mapping API]. [[mapping-roles-file]] ==== Using role mapping files diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index 93d11c0ab2af9..8dba764cc1cb1 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -12,7 +12,7 @@ the realm you use to authenticate. Both the internal `native` and `file` realms support this out of the box. The LDAP realm must be configured to run in <>. The Active Directory realm must be <> to support -_run as_. The PKI realm does not support _run as_. +_run as_. The PKI, Kerberos, and SAML realms do not support _run as_. To submit requests on behalf of other users, you need to have the `run_as` permission. For example, the following role grants permission to submit request diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 5e8f1adbc7aa8..5fd9ed610cb3e 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -9,7 +9,7 @@ password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see -{xpack-ref}/xpack-security.html[Securing the Elastic Stack]. +{xpack-ref}/elasticsearch-security.html[Securing the Elastic Stack]. To use {security} in {es}: @@ -27,6 +27,9 @@ https://www.elastic.co/subscriptions and your cluster. If you are using a trial license, the default value is `false`. For more information, see {ref}/security-settings.html[Security Settings in {es}]. +. If you plan to run {es} in a Federal Information Processing Standard (FIPS) +140-2 enabled JVM, see <>. + . Configure Transport Layer Security (TLS/SSL) for internode-communication. + -- @@ -52,8 +55,8 @@ help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. For example, you can run the command in an "interactive" mode, which prompts you -to enter new passwords for the `elastic`, `kibana`, `beats_system`, and -`logstash_system` users: +to enter new passwords for the `elastic`, `kibana`, `beats_system`, +`logstash_system`, and `apm_system` users: [source,shell] -------------------------------------------------- @@ -77,6 +80,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -142,5 +146,8 @@ include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +include::authentication/configuring-kerberos-realm.asciidoc[] +include::fips-140-compliance.asciidoc[] include::{es-repo-dir}/settings/security-settings.asciidoc[] include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc new file mode 100644 index 0000000000000..ceb605c2e2db1 --- /dev/null +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -0,0 +1,128 @@ +[role="xpack"] +[[fips-140-compliance]] +=== FIPS 140-2 + +The Federal Information Processing Standard (FIPS) Publication 140-2, (FIPS PUB +140-2), titled "Security Requirements for Cryptographic Modules" is a U.S. +government computer security standard used to approve cryptographic modules. +{es} offers a FIPS 140-2 compliant mode and as such can run in a FIPS 140-2 +enabled JVM. In order to set {es} in fips mode, you must set the +`xpack.security.fips_mode.enabled` to `true` in `elasticsearch.yml` + +For {es}, FIPS 140-2 compliance is ensured by + +- Using FIPS approved / NIST recommended cryptographic algorithms. +- Delegating the implementation of these cryptographic algorithms to a NIST + validated cryptographic module (available via the Java Security Provider + in use in the JVM). +- Allowing the configuration of {es} in a FIPS 140-2 compliant manner, as + documented below. + +[float] +=== Upgrade considerations + +If you plan to upgrade your existing Cluster to a version that can be run in +a FIPS 140-2 enabled JVM, the suggested approach is to first perform a rolling +upgrade to the new version in your existing JVM and perform all necessary +configuration changes in preparation for running in fips mode. You can then +perform a rolling restart of the nodes, this time starting each node in the FIPS +140-2 JVM. This will allow {es} to take care of a couple of things automatically for you: + +- <> will be upgraded to the latest format version as + previous format versions cannot be loaded in a FIPS 140-2 JVM. +- Self-generated trial licenses will be upgraded to the latest format that + is compliant with FIPS 140-2. + +If you are on a appropriate license level (platinum) you can elect to perform +a rolling upgrade while at the same time running each upgraded node in a +FIPS 140-2 JVM. In this case, you would need to also regenerate your +`elasticsearch.keystore` and migrate all secure settings to it, in addition to the +necessary configuration changes outlined below, before starting each node. + +[float] +=== Configuring {es} for FIPS 140-2 + +Apart from setting `xpack.security.fips_mode.enabled`, a number of security +related settings need to be configured accordingly in order to be compliant +and able to run {es} successfully in a FIPS 140-2 enabled JVM. + +[float] +==== TLS + +SSLv2 and SSLv3 are not allowed by FIPS 140-2, so `SSLv2Hello` and `SSLv3` cannot +be used for <> + +NOTE: The use of TLS ciphers is mainly governed by the relevant crypto module +(the FIPS Approved Security Provider that your JVM uses). All the ciphers that +are configured by default in {es} are FIPS 140-2 compliant and as such can be +used in a FIPS 140-2 JVM. (see <>) + +[float] +==== TLS Keystores and keys + +Keystores can be used in a number of <> in order to +conveniently store key and trust material. Neither `JKS`, nor `PKCS#12` keystores +can be used in a FIPS 140-2 enabled JVM however, so you must refrain from using +these keystores. Your FIPS 140-2 provider may provide a compliant keystore that +can be used or you can use PEM encoded files. To use PEM encoded key material, +you can use the relevant `\*.key` and `*.certificate` configuration +options, and for trust material you can use `*.certificate_authorities`. + + +FIPS 140-2 compliance dictates that the length of the public keys used for TLS +must correspond to the strength of the symmetric key algorithm in use in TLS. +Depending on the value of <> that +you select to use, the TLS keys must have corresponding length according to +the following table: + +[[comparable-key-strength]] +.Comparable key strengths +|======================= +| Symmetric Key Algorithm | RSA key Length | ECC key length +| `3DES` | 2048 | 224-255 +| `AES-128` | 3072 | 256-383 +| `AES-256` | 15630 | 512+ +|======================= + +[float] +==== Password Hashing + +{es} offers a number of algorithms for securely hashing credentials in memory and +on disk. However, only the `PBKDF2` family of algorithms is compliant with FIPS +140-2 for password hashing. You must set the the `cache.hash_algo` realm settings +and the `xpack.security.authc.password_hashing.algorithm` setting to one of the +available `PBKDF2` values. +See <>. + +Password hashing configuration changes are not retroactive so the stored hashed +credentials of existing users of the file and native realms will not be updated +on disk. +Authentication will still work, but in order to ensure FIPS 140-2 compliance, +you would need to recreate users or change their password using the +<> CLI tool for the file realm and the +<> for the native realm. + +The user cache will be emptied upon node restart, so any existing hashes using +non-compliant algorithms will be discarded and the new ones will be created +using the compliant `PBKDF2` algorithm you have selected. + +[float] +=== Limitations + +Due to the limitations that FIPS 140-2 compliance enforces, a small number of +features are not available while running in fips mode. The list is as follows: + +* Azure Classic Discovery Plugin +* Ingest Attachment Plugin +* The {ref}/certutil.html[`elasticsearch-certutil`] tool. However, + `elasticsearch-certutil` can very well be used in a non FIPS 140-2 + enabled JVM (pointing `JAVA_HOME` environment variable to a different java + installation) in order to generate the keys and certificates that + can be later used in the FIPS 140-2 enabled JVM. +* The `elasticsearch-plugin` tool. Accordingly, `elasticsearch-plugin` can be + used with a different (non FIPS 140-2 enabled) Java installation if + available. +* The SQL CLI client cannot run in a FIPS 140-2 enabled JVM while using + TLS for transport security or PKI for client authentication. +* The SAML Realm cannot decrypt and consume encrypted Assertions or encrypted + attributes in Attribute Statements from the SAML IdP. \ No newline at end of file diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index eb8e985a65b59..06e70b036735e 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -77,7 +77,17 @@ bin/elasticsearch-keystore add xpack.security.http.ssl.secure_key_passphrase . Restart {es}. -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index c186aebbe2433..c2306545536aa 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -95,7 +95,17 @@ vice-versa). After enabling TLS you must restart all nodes in order to maintain communication across the cluster. -- -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index d1c88b2786f1e..72a05ada29958 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -15,6 +15,7 @@ answers for frequently asked questions. * <> * <> * <> +* <> * <> * <> @@ -319,6 +320,77 @@ In this case, you must install the <>. -- +[[trb-security-kerberos]] +=== Common Kerberos exceptions + +*Symptoms:* + +* User authentication fails due to either GSS negotiation failure +or a service login failure (either on the server or in the {es} http client). +Some of the common exceptions are listed below with some tips to help resolve +them. + +*Resolution:* + +`Failure unspecified at GSS-API level (Mechanism level: Checksum failed)`:: ++ +-- + +When you see this error message on the HTTP client side, then it may be +related to an incorrect password. + +When you see this error message in the {es} server logs, then it may be +related to the {es} service keytab. The keytab file is present but it failed +to log in as the user. Please check the keytab expiry. Also check whether the +keytab contain up-to-date credentials; if not, replace them. + +You can use tools like `klist` or `ktab` to list principals inside +the keytab and validate them. You can use `kinit` to see if you can acquire +initial tickets using the keytab. Please check the tools and their documentation +in your Kerberos environment. + +Kerberos depends on proper hostname resolution, so please check your DNS infrastructure. +Incorrect DNS setup, DNS SRV records or configuration for KDC servers in `krb5.conf` +can cause problems with hostname resolution. + +-- + +`Failure unspecified at GSS-API level (Mechanism level: Request is a replay (34))`:: + +`Failure unspecified at GSS-API level (Mechanism level: Clock skew too great (37))`:: ++ +-- + +To prevent replay attacks, Kerberos V5 sets a maximum tolerance for computer +clock synchronization and it is typically 5 minutes. Please check whether +the time on the machines within the domain is in sync. + +-- + +As Kerberos logs are often cryptic in nature and many things can go wrong +as it depends on external services like DNS and NTP. You might +have to enable additional debug logs to determine the root cause of the issue. + +{es} uses a JAAS (Java Authentication and Authorization Service) Kerberos login +module to provide Kerberos support. To enable debug logs on {es} for the login +module use following Kerberos realm setting: +[source,yaml] +---------------- +xpack.security.authc.realms..krb.debug: true +---------------- + +For detailed information, see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +Sometimes you may need to go deeper to understand the problem during SPNEGO +GSS context negotiation or look at the Kerberos message exchange. To enable +Kerberos/SPNEGO debug logging on JVM, add following JVM system properties: + +`-Dsun.security.krb5.debug=true` + +`-Dsun.security.spnego.debug=true` + +For more information about JVM system properties, see {ref}/jvm-options.html[configuring JVM options]. + [[trb-security-internalserver]] === Internal Server Error in Kibana diff --git a/x-pack/license-tools/build.gradle b/x-pack/license-tools/build.gradle index 183b9ab50e03b..4bd17713a2fea 100644 --- a/x-pack/license-tools/build.gradle +++ b/x-pack/license-tools/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.build' dependencies { - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch:elasticsearch:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index a430745db06f5..0b1f889a2c121 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -32,13 +32,22 @@ task internalClusterTest(type: RandomizedTestingTask, systemProperty 'es.set.netty.runtime.available.processors', 'false' } +check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test -check.dependsOn(internalClusterTest, 'qa:multi-cluster:followClusterTest', 'qa:multi-cluster-with-security:followClusterTest') + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle new file mode 100644 index 0000000000000..97d4008eb8c1f --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle @@ -0,0 +1,40 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java new file mode 100644 index 0000000000000..06d9f91c7abb7 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.util.Locale; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; + +public class CcrMultiClusterLicenseIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertLicenseIncompatible(request); + } + } + + public void testCreateAndFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertLicenseIncompatible(request); + } + } + + private static void assertLicenseIncompatible(final Request request) { + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final String expected = String.format( + Locale.ROOT, + "can not fetch remote index [%s] metadata as the remote cluster [%s] is not licensed for [ccr]; " + + "the license mode [BASIC] on cluster [%s] does not enable [ccr]", + "leader_cluster:leader", + "leader_cluster", + "leader_cluster"); + assertThat(e, hasToString(containsString(expected))); + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle index 97c019d4c73a4..897aed0110e17 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -3,7 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } @@ -71,4 +71,5 @@ followClusterTestRunner { finalizedBy 'leaderClusterTestCluster#stop' } +check.dependsOn followClusterTest test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 2ee0f9c1b45c8..cc726e1a65257 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -3,7 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } @@ -14,6 +14,7 @@ task leaderClusterTest(type: RestIntegTestTask) { leaderClusterTestCluster { numNodes = 1 clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' } leaderClusterTestRunner { @@ -26,6 +27,7 @@ followClusterTestCluster { dependsOn leaderClusterTestRunner numNodes = 1 clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" } @@ -35,4 +37,5 @@ followClusterTestRunner { finalizedBy 'leaderClusterTestCluster#stop' } +check.dependsOn followClusterTest test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index dedf3ea0f7555..b00883f5c2af2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -20,6 +20,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.license.XPackLicenseState; @@ -31,10 +33,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; import org.elasticsearch.xpack.ccr.action.FollowIndexAction; @@ -54,8 +58,10 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.function.Supplier; @@ -72,15 +78,42 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final boolean enabled; private final Settings settings; + private final CcrLicenseChecker ccrLicenseChecker; /** * Construct an instance of the CCR container with the specified settings. * * @param settings the settings */ + @SuppressWarnings("unused") // constructed reflectively by the plugin infrastructure public Ccr(final Settings settings) { + this(settings, new CcrLicenseChecker()); + } + + /** + * Construct an instance of the CCR container with the specified settings and license checker. + * + * @param settings the settings + * @param ccrLicenseChecker the CCR license checker + */ + Ccr(final Settings settings, final CcrLicenseChecker ccrLicenseChecker) { this.settings = settings; this.enabled = CCR_ENABLED_SETTING.get(settings); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + public Collection createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry) { + return Collections.singleton(ccrLicenseChecker); } @Override @@ -121,7 +154,7 @@ public List getNamedWriteables() { ShardFollowTask::new), // Task statuses - new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.NAME, + new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.STATUS_PARSER_NAME, ShardFollowNodeTask.Status::new) ); } @@ -133,9 +166,10 @@ public List getNamedXContent() { ShardFollowTask::fromXContent), // Task statuses - new NamedXContentRegistry.Entry(ShardFollowNodeTask.Status.class, new ParseField(ShardFollowNodeTask.Status.NAME), - ShardFollowNodeTask.Status::fromXContent) - ); + new NamedXContentRegistry.Entry( + ShardFollowNodeTask.Status.class, + new ParseField(ShardFollowNodeTask.Status.STATUS_PARSER_NAME), + ShardFollowNodeTask.Status::fromXContent)); } /** diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java new file mode 100644 index 0000000000000..cefa490f4f7e2 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.license.RemoteClusterLicenseChecker; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Collections; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; + +/** + * Encapsulates licensing checking for CCR. + */ +public final class CcrLicenseChecker { + + private final BooleanSupplier isCcrAllowed; + + /** + * Constructs a CCR license checker with the default rule based on the license state for checking if CCR is allowed. + */ + CcrLicenseChecker() { + this(XPackPlugin.getSharedLicenseState()::isCcrAllowed); + } + + /** + * Constructs a CCR license checker with the specified boolean supplier. + * + * @param isCcrAllowed a boolean supplier that should return true if CCR is allowed and false otherwise + */ + CcrLicenseChecker(final BooleanSupplier isCcrAllowed) { + this.isCcrAllowed = Objects.requireNonNull(isCcrAllowed); + } + + /** + * Returns whether or not CCR is allowed. + * + * @return true if CCR is allowed, otherwise false + */ + public boolean isCcrAllowed() { + return isCcrAllowed.getAsBoolean(); + } + + /** + * Fetches the leader index metadata from the remote cluster. Before fetching the index metadata, the remote cluster is checked for + * license compatibility with CCR. If the remote cluster is not licensed for CCR, the {@link ActionListener#onFailure(Exception)} method + * of the specified listener is invoked. Otherwise, the specified consumer is invoked with the leader index metadata fetched from the + * remote cluster. + * + * @param client the client + * @param clusterAlias the remote cluster alias + * @param leaderIndex the name of the leader index + * @param listener the listener + * @param leaderIndexMetadataConsumer the leader index metadata consumer + * @param the type of response the listener is waiting for + */ + public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + final Client client, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener, + final Consumer leaderIndexMetadataConsumer) { + // we have to check the license on the remote cluster + new RemoteClusterLicenseChecker(client, XPackLicenseState::isCcrAllowedForOperationMode).checkRemoteClusterLicenses( + Collections.singletonList(clusterAlias), + new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + if (licenseCheck.isSuccess()) { + final Client remoteClient = client.getRemoteClusterClient(clusterAlias); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(leaderIndex); + final ActionListener clusterStateListener = ActionListener.wrap( + r -> { + final ClusterState remoteClusterState = r.getState(); + final IndexMetaData leaderIndexMetadata = + remoteClusterState.getMetaData().index(leaderIndex); + leaderIndexMetadataConsumer.accept(leaderIndexMetadata); + }, + listener::onFailure); + // following an index in remote cluster, so use remote client to fetch leader index metadata + remoteClient.admin().cluster().state(clusterStateRequest, clusterStateListener); + } else { + listener.onFailure(incompatibleRemoteLicense(leaderIndex, licenseCheck)); + } + } + + @Override + public void onFailure(final Exception e) { + listener.onFailure(unknownRemoteLicense(leaderIndex, clusterAlias, e)); + } + + }); + } + + private static ElasticsearchStatusException incompatibleRemoteLicense( + final String leaderIndex, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the remote cluster [%s] is not licensed for [ccr]; %s", + clusterAlias, + leaderIndex, + clusterAlias, + RemoteClusterLicenseChecker.buildErrorMessage( + "ccr", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } + + private static ElasticsearchStatusException unknownRemoteLicense( + final String leaderIndex, final String clusterAlias, final Exception cause) { + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the license state of the remote cluster [%s] could not be determined", + clusterAlias, + leaderIndex, + clusterAlias); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java index eaded2456306a..b5d6697fc73c2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java @@ -40,7 +40,7 @@ private CcrStatsAction() { @Override public TasksResponse newResponse() { - return null; + return new TasksResponse(); } public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java index 1ce915fb19269..2e36bca293225 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ccr.action; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; @@ -11,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardsObserver; @@ -36,10 +36,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; import java.io.IOException; @@ -185,16 +187,25 @@ public static class TransportAction extends TransportMasterNodeAction listener) throws Exception { - String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; - Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - // Following an index in local cluster, so use local cluster state to fetch leader IndexMetaData: - IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); - createFollowIndex(leaderIndexMetadata, request, listener); + protected void masterOperation( + final Request request, final ClusterState state, final ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed()) { + final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + createFollowerIndexAndFollowLocalIndex(request, state, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); + } } else { - // Following an index in remote cluster, so use remote client to fetch leader IndexMetaData: - assert remoteClusterIndices.size() == 1; - Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - String clusterNameAlias = entry.getKey(); - String leaderIndex = entry.getValue().get(0); - - Client remoteClient = client.getRemoteClusterClient(clusterNameAlias); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.clear(); - clusterStateRequest.metaData(true); - clusterStateRequest.indices(leaderIndex); - remoteClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(r -> { - ClusterState remoteClusterState = r.getState(); - IndexMetaData leaderIndexMetadata = remoteClusterState.getMetaData().index(leaderIndex); - createFollowIndex(leaderIndexMetadata, request, listener); - }, listener::onFailure)); + listener.onFailure(LicenseUtils.newComplianceException("ccr")); } } - private void createFollowIndex(IndexMetaData leaderIndexMetaData, Request request, ActionListener listener) { + private void createFollowerIndexAndFollowLocalIndex( + final Request request, final ClusterState state, final ActionListener listener) { + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); + createFollowerIndex(leaderIndexMetadata, request, listener); + } + + private void createFollowerIndexAndFollowRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener, + leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); + } + + private void createFollowerIndex( + final IndexMetaData leaderIndexMetaData, final Request request, final ActionListener listener) { if (leaderIndexMetaData == null) { listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + "] does not exist")); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java index 5f33bcd6fcd03..17b7bbe674b38 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -40,6 +39,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; @@ -47,6 +47,7 @@ import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; import java.io.IOException; @@ -61,7 +62,7 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; -public class FollowIndexAction extends Action { +public class FollowIndexAction extends Action { public static final FollowIndexAction INSTANCE = new FollowIndexAction(); public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; @@ -71,8 +72,8 @@ private FollowIndexAction() { } @Override - public Response newResponse() { - return new Response(); + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); } public static class Request extends ActionRequest implements ToXContentObject { @@ -128,9 +129,17 @@ public static Request fromXContent(XContentParser parser, String followerIndex) private TimeValue retryTimeout; private TimeValue idleShardRetryDelay; - public Request(String leaderIndex, String followerIndex, Integer maxBatchOperationCount, Integer maxConcurrentReadBatches, - Long maxOperationSizeInBytes, Integer maxConcurrentWriteBatches, Integer maxWriteBufferSize, - TimeValue retryTimeout, TimeValue idleShardRetryDelay) { + public Request( + String leaderIndex, + String followerIndex, + Integer maxBatchOperationCount, + Integer maxConcurrentReadBatches, + Long maxOperationSizeInBytes, + Integer maxConcurrentWriteBatches, + Integer maxWriteBufferSize, + TimeValue retryTimeout, + TimeValue idleShardRetryDelay) { + if (leaderIndex == null) { throw new IllegalArgumentException("leader_index is missing"); } @@ -270,22 +279,21 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(leaderIndex, followerIndex, maxBatchOperationCount, maxConcurrentReadBatches, maxOperationSizeInBytes, - maxConcurrentWriteBatches, maxWriteBufferSize, retryTimeout, idleShardRetryDelay); - } - } - - public static class Response extends AcknowledgedResponse { - - Response() { - } - - Response(boolean acknowledged) { - super(acknowledged); + return Objects.hash( + leaderIndex, + followerIndex, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + retryTimeout, + idleShardRetryDelay + ); } } - public static class TransportAction extends HandledTransportAction { + public static class TransportAction extends HandledTransportAction { private final Client client; private final ThreadPool threadPool; @@ -293,11 +301,19 @@ public static class TransportAction extends HandledTransportAction listener) { - ClusterState localClusterState = clusterService.state(); - IndexMetaData followIndexMetadata = localClusterState.getMetaData().index(request.followerIndex); - - String[] indices = new String[]{request.leaderIndex}; - Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - // Following an index in local cluster, so use local cluster state to fetch leader IndexMetaData: - IndexMetaData leaderIndexMetadata = localClusterState.getMetaData().index(request.leaderIndex); - try { - start(request, null, leaderIndexMetadata, followIndexMetadata, listener); - } catch (IOException e) { - listener.onFailure(e); - return; + protected void doExecute(final Task task, + final Request request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed()) { + final String[] indices = new String[]{request.leaderIndex}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + followLocalIndex(request, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + followRemoteIndex(request, clusterAlias, leaderIndex, listener); } } else { - // Following an index in remote cluster, so use remote client to fetch leader IndexMetaData: - assert remoteClusterIndices.size() == 1; - Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - String clusterNameAlias = entry.getKey(); - String leaderIndex = entry.getValue().get(0); - - Client remoteClient = client.getRemoteClusterClient(clusterNameAlias); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.clear(); - clusterStateRequest.metaData(true); - clusterStateRequest.indices(leaderIndex); - remoteClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(r -> { - ClusterState remoteClusterState = r.getState(); - IndexMetaData leaderIndexMetadata = remoteClusterState.getMetaData().index(leaderIndex); - start(request, clusterNameAlias, leaderIndexMetadata, followIndexMetadata, listener); - }, listener::onFailure)); + listener.onFailure(LicenseUtils.newComplianceException("ccr")); } } + private void followLocalIndex(final Request request, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); + try { + start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + } + + private void followRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener, + leaderIndexMetadata -> { + try { + start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + /** * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader @@ -354,8 +390,13 @@ protected void doExecute(Task task, Request request, ActionListener li *
  • The leader index and follow index need to have the same number of primary shards
  • * */ - void start(Request request, String clusterNameAlias, IndexMetaData leaderIndexMetadata, IndexMetaData followIndexMetadata, - ActionListener handler) throws IOException { + void start( + Request request, + String clusterNameAlias, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + ActionListener handler) throws IOException { + MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); final int numShards = followIndexMetadata.getNumberOfShards(); @@ -403,7 +444,7 @@ void finalizeResponse() { if (error == null) { // include task ids? - handler.onResponse(new Response(true)); + handler.onResponse(new AcknowledgedResponse(true)); } else { // TODO: cancel all started tasks handler.onFailure(error); @@ -467,7 +508,9 @@ void finalizeResponse() { WHITELISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); } - static void validate(Request request, IndexMetaData leaderIndex, IndexMetaData followIndex, MapperService followerMapperService) { + static void validate(Request request, + IndexMetaData leaderIndex, + IndexMetaData followIndex, MapperService followerMapperService) { if (leaderIndex == null) { throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not exist"); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index bc63ba5944e9f..b505ee015bab6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -146,14 +146,25 @@ public boolean equals(final Object o) { public int hashCode() { return Objects.hash(fromSeqNo, maxOperationCount, shardId, maxOperationSizeInBytes); } + + @Override + public String toString() { + return "Request{" + + "fromSeqNo=" + fromSeqNo + + ", maxOperationCount=" + maxOperationCount + + ", shardId=" + shardId + + ", maxOperationSizeInBytes=" + maxOperationSizeInBytes + + '}'; + } + } public static final class Response extends ActionResponse { - private long indexMetadataVersion; + private long mappingVersion; - public long getIndexMetadataVersion() { - return indexMetadataVersion; + public long getMappingVersion() { + return mappingVersion; } private long globalCheckpoint; @@ -177,8 +188,8 @@ public Translog.Operation[] getOperations() { Response() { } - Response(final long indexMetadataVersion, final long globalCheckpoint, final long maxSeqNo, final Translog.Operation[] operations) { - this.indexMetadataVersion = indexMetadataVersion; + Response(final long mappingVersion, final long globalCheckpoint, final long maxSeqNo, final Translog.Operation[] operations) { + this.mappingVersion = mappingVersion; this.globalCheckpoint = globalCheckpoint; this.maxSeqNo = maxSeqNo; this.operations = operations; @@ -187,7 +198,7 @@ public Translog.Operation[] getOperations() { @Override public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); - indexMetadataVersion = in.readVLong(); + mappingVersion = in.readVLong(); globalCheckpoint = in.readZLong(); maxSeqNo = in.readZLong(); operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); @@ -196,7 +207,7 @@ public void readFrom(final StreamInput in) throws IOException { @Override public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); - out.writeVLong(indexMetadataVersion); + out.writeVLong(mappingVersion); out.writeZLong(globalCheckpoint); out.writeZLong(maxSeqNo); out.writeArray(Translog.Operation::writeOperation, operations); @@ -207,7 +218,7 @@ public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final Response that = (Response) o; - return indexMetadataVersion == that.indexMetadataVersion && + return mappingVersion == that.mappingVersion && globalCheckpoint == that.globalCheckpoint && maxSeqNo == that.maxSeqNo && Arrays.equals(operations, that.operations); @@ -215,7 +226,7 @@ public boolean equals(final Object o) { @Override public int hashCode() { - return Objects.hash(indexMetadataVersion, globalCheckpoint, maxSeqNo, Arrays.hashCode(operations)); + return Objects.hash(mappingVersion, globalCheckpoint, maxSeqNo, Arrays.hashCode(operations)); } } @@ -241,7 +252,7 @@ protected Response shardOperation(Request request, ShardId shardId) throws IOExc IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); IndexShard indexShard = indexService.getShard(request.getShard().id()); final SeqNoStats seqNoStats = indexShard.seqNoStats(); - final long indexMetaDataVersion = clusterService.state().metaData().index(shardId.getIndex()).getVersion(); + final long mappingVersion = clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion(); final Translog.Operation[] operations = getOperations( indexShard, @@ -249,7 +260,7 @@ protected Response shardOperation(Request request, ShardId shardId) throws IOExc request.fromSeqNo, request.maxOperationCount, request.maxOperationSizeInBytes); - return new Response(indexMetaDataVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); + return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); } @Override @@ -293,7 +304,7 @@ static Translog.Operation[] getOperations(IndexShard indexShard, long globalChec // - 1 is needed, because toSeqNo is inclusive long toSeqNo = Math.min(globalCheckpoint, (fromSeqNo + maxOperationCount) - 1); final List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = indexShard.newLuceneChangesSnapshot("ccr", fromSeqNo, toSeqNo, true)) { + try (Translog.Snapshot snapshot = indexShard.newChangesSnapshot("ccr", fromSeqNo, toSeqNo, true)) { Translog.Operation op; while ((op = snapshot.next()) != null) { operations.add(op); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index cfc8e0fc4e766..00e3aaaae2a8e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -30,20 +30,25 @@ import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import java.io.IOException; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.Objects; import java.util.PriorityQueue; import java.util.Queue; +import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; import java.util.function.LongSupplier; +import java.util.stream.Collectors; /** * The node task that fetch the write operations from a leader shard and @@ -62,6 +67,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private static final Logger LOGGER = Loggers.getLogger(ShardFollowNodeTask.class); + private final String leaderIndex; private final ShardFollowTask params; private final TimeValue retryTimeout; private final TimeValue idleShardChangesRequestDelay; @@ -75,7 +81,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private long followerMaxSeqNo = 0; private int numConcurrentReads = 0; private int numConcurrentWrites = 0; - private long currentIndexMetadataVersion = 0; + private long currentMappingVersion = 0; private long totalFetchTimeMillis = 0; private long numberOfSuccessfulFetches = 0; private long numberOfFailedFetches = 0; @@ -85,7 +91,9 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private long numberOfSuccessfulBulkOperations = 0; private long numberOfFailedBulkOperations = 0; private long numberOfOperationsIndexed = 0; + private long lastFetchTime = -1; private final Queue buffer = new PriorityQueue<>(Comparator.comparing(Translog.Operation::seqNo)); + private final LinkedHashMap fetchExceptions; ShardFollowNodeTask(long id, String type, String action, String description, TaskId parentTask, Map headers, ShardFollowTask params, BiConsumer scheduler, final LongSupplier relativeTimeProvider) { @@ -95,6 +103,23 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { this.relativeTimeProvider = relativeTimeProvider; this.retryTimeout = params.getRetryTimeout(); this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); + /* + * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of + * concurrent fetches. For each failed fetch, we track the from sequence number associated with the request, and we clear the entry + * when the fetch task associated with that from sequence number succeeds. + */ + this.fetchExceptions = new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return size() > params.getMaxConcurrentReadBatches(); + } + }; + + if (params.getLeaderClusterAlias() != null) { + leaderIndex = params.getLeaderClusterAlias() + ":" + params.getLeaderShardId().getIndexName(); + } else { + leaderIndex = params.getLeaderShardId().getIndexName(); + } } void start( @@ -114,14 +139,13 @@ void start( this.lastRequestedSeqNo = followerGlobalCheckpoint; } - // Forcefully updates follower mapping, this gets us the leader imd version and - // makes sure that leader and follower mapping are identical. - updateMapping(imdVersion -> { + // updates follower mapping, this gets us the leader mapping version and makes sure that leader and follower mapping are identical + updateMapping(mappingVersion -> { synchronized (ShardFollowNodeTask.this) { - currentIndexMetadataVersion = imdVersion; + currentMappingVersion = mappingVersion; } - LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, indexMetaDataVersion={}", - params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, imdVersion); + LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, mappingVersion={}", + params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, mappingVersion); coordinateReads(); }); } @@ -219,11 +243,15 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR private void sendShardChangesRequest(long from, int maxOperationCount, long maxRequiredSeqNo, AtomicInteger retryCounter) { final long startTime = relativeTimeProvider.getAsLong(); + synchronized (this) { + lastFetchTime = startTime; + } innerSendShardChangesRequest(from, maxOperationCount, response -> { synchronized (ShardFollowNodeTask.this) { totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); numberOfSuccessfulFetches++; + fetchExceptions.remove(from); operationsReceived += response.getOperations().length; totalTransferredBytes += Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); } @@ -233,13 +261,14 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR synchronized (ShardFollowNodeTask.this) { totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); numberOfFailedFetches++; + fetchExceptions.put(from, new ElasticsearchException(e)); } handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); }); } void handleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Response response) { - maybeUpdateMapping(response.getIndexMetadataVersion(), () -> innerHandleReadResponse(from, maxRequiredSeqNo, response)); + maybeUpdateMapping(response.getMappingVersion(), () -> innerHandleReadResponse(from, maxRequiredSeqNo, response)); } /** Called when some operations are fetched from the leading */ @@ -325,16 +354,16 @@ private synchronized void handleWriteResponse(final BulkShardOperationsResponse coordinateReads(); } - private synchronized void maybeUpdateMapping(Long minimumRequiredIndexMetadataVersion, Runnable task) { - if (currentIndexMetadataVersion >= minimumRequiredIndexMetadataVersion) { - LOGGER.trace("{} index metadata version [{}] is higher or equal than minimum required index metadata version [{}]", - params.getFollowShardId(), currentIndexMetadataVersion, minimumRequiredIndexMetadataVersion); + private synchronized void maybeUpdateMapping(Long minimumRequiredMappingVersion, Runnable task) { + if (currentMappingVersion >= minimumRequiredMappingVersion) { + LOGGER.trace("{} mapping version [{}] is higher or equal than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); task.run(); } else { - LOGGER.trace("{} updating mapping, index metadata version [{}] is lower than minimum required index metadata version [{}]", - params.getFollowShardId(), currentIndexMetadataVersion, minimumRequiredIndexMetadataVersion); - updateMapping(imdVersion -> { - currentIndexMetadataVersion = imdVersion; + LOGGER.trace("{} updating mapping, mapping version [{}] is lower than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); + updateMapping(mappingVersion -> { + currentMappingVersion = mappingVersion; task.run(); }); } @@ -393,7 +422,15 @@ public ShardId getFollowShardId() { @Override public synchronized Status getStatus() { + final long timeSinceLastFetchMillis; + if (lastFetchTime != -1) { + timeSinceLastFetchMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - lastFetchTime); + } else { + // To avoid confusion when ccr didn't yet execute a fetch: + timeSinceLastFetchMillis = -1; + } return new Status( + leaderIndex, getFollowShardId().getId(), leaderGlobalCheckpoint, leaderMaxSeqNo, @@ -403,7 +440,7 @@ public synchronized Status getStatus() { numConcurrentReads, numConcurrentWrites, buffer.size(), - currentIndexMetadataVersion, + currentMappingVersion, totalFetchTimeMillis, numberOfSuccessfulFetches, numberOfFailedFetches, @@ -412,13 +449,16 @@ public synchronized Status getStatus() { totalIndexTimeMillis, numberOfSuccessfulBulkOperations, numberOfFailedBulkOperations, - numberOfOperationsIndexed); + numberOfOperationsIndexed, + new TreeMap<>(fetchExceptions), + timeSinceLastFetchMillis); } public static class Status implements Task.Status { - public static final String NAME = "shard-follow-node-task-status"; + public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; + static final ParseField LEADER_INDEX = new ParseField("leader_index"); static final ParseField SHARD_ID = new ParseField("shard_id"); static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); @@ -428,7 +468,7 @@ public static class Status implements Task.Status { static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); - static final ParseField INDEX_METADATA_VERSION_FIELD = new ParseField("index_metadata_version"); + static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); @@ -438,19 +478,22 @@ public static class Status implements Task.Status { static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); + static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); + static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + @SuppressWarnings("unchecked") + static final ConstructingObjectParser STATUS_PARSER = new ConstructingObjectParser<>(STATUS_PARSER_NAME, args -> new Status( - (int) args[0], - (long) args[1], + (String) args[0], + (int) args[1], (long) args[2], (long) args[3], (long) args[4], (long) args[5], - (int) args[6], + (long) args[6], (int) args[7], (int) args[8], - (long) args[9], + (int) args[9], (long) args[10], (long) args[11], (long) args[12], @@ -459,28 +502,61 @@ public static class Status implements Task.Status { (long) args[15], (long) args[16], (long) args[17], - (long) args[18])); + (long) args[18], + (long) args[19], + new TreeMap<>( + ((List>) args[20]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), + (long) args[21])); + + public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + + static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + new ConstructingObjectParser<>( + FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); + + static { + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + } + + static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); static { - PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), INDEX_METADATA_VERSION_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + } + + private final String leaderIndex; + + public String leaderIndex() { + return leaderIndex; } private final int shardId; @@ -537,10 +613,10 @@ public int numberOfQueuedWrites() { return numberOfQueuedWrites; } - private final long indexMetadataVersion; + private final long mappingVersion; - public long indexMetadataVersion() { - return indexMetadataVersion; + public long mappingVersion() { + return mappingVersion; } private final long totalFetchTimeMillis; @@ -597,7 +673,20 @@ public long numberOfOperationsIndexed() { return numberOfOperationsIndexed; } + private final NavigableMap fetchExceptions; + + public NavigableMap fetchExceptions() { + return fetchExceptions; + } + + private final long timeSinceLastFetchMillis; + + public long timeSinceLastFetchMillis() { + return timeSinceLastFetchMillis; + } + Status( + final String leaderIndex, final int shardId, final long leaderGlobalCheckpoint, final long leaderMaxSeqNo, @@ -607,7 +696,7 @@ public long numberOfOperationsIndexed() { final int numberOfConcurrentReads, final int numberOfConcurrentWrites, final int numberOfQueuedWrites, - final long indexMetadataVersion, + final long mappingVersion, final long totalFetchTimeMillis, final long numberOfSuccessfulFetches, final long numberOfFailedFetches, @@ -616,7 +705,10 @@ public long numberOfOperationsIndexed() { final long totalIndexTimeMillis, final long numberOfSuccessfulBulkOperations, final long numberOfFailedBulkOperations, - final long numberOfOperationsIndexed) { + final long numberOfOperationsIndexed, + final NavigableMap fetchExceptions, + final long timeSinceLastFetchMillis) { + this.leaderIndex = leaderIndex; this.shardId = shardId; this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; this.leaderMaxSeqNo = leaderMaxSeqNo; @@ -626,7 +718,7 @@ public long numberOfOperationsIndexed() { this.numberOfConcurrentReads = numberOfConcurrentReads; this.numberOfConcurrentWrites = numberOfConcurrentWrites; this.numberOfQueuedWrites = numberOfQueuedWrites; - this.indexMetadataVersion = indexMetadataVersion; + this.mappingVersion = mappingVersion; this.totalFetchTimeMillis = totalFetchTimeMillis; this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; this.numberOfFailedFetches = numberOfFailedFetches; @@ -636,9 +728,12 @@ public long numberOfOperationsIndexed() { this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; this.numberOfOperationsIndexed = numberOfOperationsIndexed; + this.fetchExceptions = Objects.requireNonNull(fetchExceptions); + this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; } public Status(final StreamInput in) throws IOException { + this.leaderIndex = in.readString(); this.shardId = in.readVInt(); this.leaderGlobalCheckpoint = in.readZLong(); this.leaderMaxSeqNo = in.readZLong(); @@ -648,7 +743,7 @@ public Status(final StreamInput in) throws IOException { this.numberOfConcurrentReads = in.readVInt(); this.numberOfConcurrentWrites = in.readVInt(); this.numberOfQueuedWrites = in.readVInt(); - this.indexMetadataVersion = in.readVLong(); + this.mappingVersion = in.readVLong(); this.totalFetchTimeMillis = in.readVLong(); this.numberOfSuccessfulFetches = in.readVLong(); this.numberOfFailedFetches = in.readVLong(); @@ -658,15 +753,18 @@ public Status(final StreamInput in) throws IOException { this.numberOfSuccessfulBulkOperations = in.readVLong(); this.numberOfFailedBulkOperations = in.readVLong(); this.numberOfOperationsIndexed = in.readVLong(); + this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); + this.timeSinceLastFetchMillis = in.readZLong(); } @Override public String getWriteableName() { - return NAME; + return STATUS_PARSER_NAME; } @Override public void writeTo(final StreamOutput out) throws IOException { + out.writeString(leaderIndex); out.writeVInt(shardId); out.writeZLong(leaderGlobalCheckpoint); out.writeZLong(leaderMaxSeqNo); @@ -676,7 +774,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(numberOfConcurrentReads); out.writeVInt(numberOfConcurrentWrites); out.writeVInt(numberOfQueuedWrites); - out.writeVLong(indexMetadataVersion); + out.writeVLong(mappingVersion); out.writeVLong(totalFetchTimeMillis); out.writeVLong(numberOfSuccessfulFetches); out.writeVLong(numberOfFailedFetches); @@ -686,12 +784,15 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(numberOfSuccessfulBulkOperations); out.writeVLong(numberOfFailedBulkOperations); out.writeVLong(numberOfOperationsIndexed); + out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); + out.writeZLong(timeSinceLastFetchMillis); } @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { + builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); builder.field(SHARD_ID.getPreferredName(), shardId); builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); @@ -701,7 +802,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); - builder.field(INDEX_METADATA_VERSION_FIELD.getPreferredName(), indexMetadataVersion); + builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); builder.humanReadableField( TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), "total_fetch_time", @@ -720,13 +821,34 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); + builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry entry : fetchExceptions.entrySet()) { + builder.startObject(); + { + builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), + "time_since_last_fetch", + new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); } builder.endObject(); return builder; } public static Status fromXContent(final XContentParser parser) { - return PARSER.apply(parser, null); + return STATUS_PARSER.apply(parser, null); } @Override @@ -734,7 +856,8 @@ public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final Status that = (Status) o; - return shardId == that.shardId && + return leaderIndex.equals(that.leaderIndex) && + shardId == that.shardId && leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && leaderMaxSeqNo == that.leaderMaxSeqNo && followerGlobalCheckpoint == that.followerGlobalCheckpoint && @@ -743,19 +866,29 @@ public boolean equals(final Object o) { numberOfConcurrentReads == that.numberOfConcurrentReads && numberOfConcurrentWrites == that.numberOfConcurrentWrites && numberOfQueuedWrites == that.numberOfQueuedWrites && - indexMetadataVersion == that.indexMetadataVersion && + mappingVersion == that.mappingVersion && totalFetchTimeMillis == that.totalFetchTimeMillis && numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && numberOfFailedFetches == that.numberOfFailedFetches && operationsReceived == that.operationsReceived && totalTransferredBytes == that.totalTransferredBytes && numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && - numberOfFailedBulkOperations == that.numberOfFailedBulkOperations; + numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && + numberOfOperationsIndexed == that.numberOfOperationsIndexed && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by + * keys. + */ + fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && + timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; } @Override public int hashCode() { return Objects.hash( + leaderIndex, shardId, leaderGlobalCheckpoint, leaderMaxSeqNo, @@ -765,15 +898,26 @@ public int hashCode() { numberOfConcurrentReads, numberOfConcurrentWrites, numberOfQueuedWrites, - indexMetadataVersion, + mappingVersion, totalFetchTimeMillis, numberOfSuccessfulFetches, numberOfFailedFetches, operationsReceived, totalTransferredBytes, numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations); - + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. + */ + fetchExceptions.keySet(), + getFetchExceptionMessages(this), + timeSinceLastFetchMillis); + } + + private static List getFetchExceptionMessages(final Status status) { + return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); } public String toString() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 34ba3a2e5c691..83e3e4806e184 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -115,7 +115,7 @@ protected void innerUpdateMapping(LongConsumer handler, Consumer erro putMappingRequest.type(mappingMetaData.type()); putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( - putMappingResponse -> handler.accept(indexMetaData.getVersion()), + putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), errorHandler)); }, errorHandler)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java index 8949739e76d93..33873201f5fb3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -17,14 +17,17 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.function.Consumer; @@ -34,6 +37,7 @@ public class TransportCcrStatsAction extends TransportTasksAction< CcrStatsAction.TasksResponse, CcrStatsAction.TaskResponse> { private final IndexNameExpressionResolver resolver; + private final CcrLicenseChecker ccrLicenseChecker; @Inject public TransportCcrStatsAction( @@ -41,7 +45,8 @@ public TransportCcrStatsAction( final ClusterService clusterService, final TransportService transportService, final ActionFilters actionFilters, - final IndexNameExpressionResolver resolver) { + final IndexNameExpressionResolver resolver, + final CcrLicenseChecker ccrLicenseChecker) { super( settings, CcrStatsAction.NAME, @@ -51,7 +56,20 @@ public TransportCcrStatsAction( CcrStatsAction.TasksRequest::new, CcrStatsAction.TasksResponse::new, Ccr.CCR_THREAD_POOL_NAME); - this.resolver = resolver; + this.resolver = Objects.requireNonNull(resolver); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute( + final Task task, + final CcrStatsAction.TasksRequest request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed()) { + super.doExecute(task, request, listener); + } else { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + } } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java index f671d59cfe469..93b2bcc3e4096 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java @@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; -public class UnfollowIndexAction extends Action { +public class UnfollowIndexAction extends Action { public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; @@ -38,8 +38,8 @@ private UnfollowIndexAction() { } @Override - public Response newResponse() { - return new Response(); + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); } public static class Request extends ActionRequest { @@ -72,31 +72,27 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static class Response extends AcknowledgedResponse { - - Response(boolean acknowledged) { - super(acknowledged); - } - - Response() { - } - } - - public static class TransportAction extends HandledTransportAction { + public static class TransportAction extends HandledTransportAction { private final Client client; private final PersistentTasksService persistentTasksService; @Inject - public TransportAction(Settings settings, TransportService transportService, - ActionFilters actionFilters, Client client, PersistentTasksService persistentTasksService) { + public TransportAction(Settings settings, + TransportService transportService, + ActionFilters actionFilters, + Client client, + PersistentTasksService persistentTasksService) { super(settings, NAME, transportService, actionFilters, Request::new); this.client = client; this.persistentTasksService = persistentTasksService; } @Override - protected void doExecute(Task task, Request request, ActionListener listener) { + protected void doExecute(Task task, + Request request, + ActionListener listener) { + client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.followIndex); if (followIndexMetadata == null) { @@ -140,7 +136,7 @@ void finalizeResponse() { if (error == null) { // include task ids? - listener.onResponse(new Response(true)); + listener.onResponse(new AcknowledgedResponse(true)); } else { // TODO: cancel all started tasks listener.onFailure(error); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java new file mode 100644 index 0000000000000..675758903bf27 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CcrLicenseIT extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(IncompatibleLicenseLocalStateCcr.class); + } + + public void testThatFollowingIndexIsUnavailableWithIncompatibleLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + FollowIndexAction.INSTANCE, + followRequest, + new ActionListener() { + @Override + public void onResponse(final AcknowledgedResponse response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCreateAndFollowingIndexIsUnavailableWithIncompatibleLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + CreateAndFollowIndexAction.INSTANCE, + createAndFollowRequest, + new ActionListener() { + @Override + public void onResponse(final CreateAndFollowIndexAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCcrStatsAreUnavailableWithIncompatibleLicense() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.TasksRequest(), new ActionListener() { + @Override + public void onResponse(final CcrStatsAction.TasksResponse tasksResponse) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + + latch.await(); + } + + private void assertIncompatibleLicense(final Exception e) { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [ccr]")); + } + + private FollowIndexAction.Request getFollowRequest() { + return new FollowIndexAction.Request( + "leader", + "follower", + ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java index c9a862954566a..0a9ca00590b3d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ccr; import org.elasticsearch.Version; @@ -40,9 +41,8 @@ public void testGetEngineFactory() throws IOException { .numberOfShards(1) .numberOfReplicas(0) .build(); - final Ccr ccr = new Ccr(Settings.EMPTY); - final Optional engineFactory = - ccr.getEngineFactory(new IndexSettings(indexMetaData, Settings.EMPTY)); + final Ccr ccr = new Ccr(Settings.EMPTY, new CcrLicenseChecker(() -> true)); + final Optional engineFactory = ccr.getEngineFactory(new IndexSettings(indexMetaData, Settings.EMPTY)); if (value != null && value) { assertTrue(engineFactory.isPresent()); assertThat(engineFactory.get(), instanceOf(FollowingEngineFactory.class)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java new file mode 100644 index 0000000000000..c4b765d3c65ea --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class IncompatibleLicenseLocalStateCcr extends LocalStateCompositeXPackPlugin { + + public IncompatibleLicenseLocalStateCcr(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> false)) { + + @Override + protected XPackLicenseState getLicenseState() { + return IncompatibleLicenseLocalStateCcr.this.getLicenseState(); + } + + }); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java index b705ce53e7e1a..cfc30b8dfac47 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ccr; import org.elasticsearch.common.settings.Settings; @@ -15,14 +16,16 @@ public class LocalStateCcr extends LocalStateCompositeXPackPlugin { public LocalStateCcr(final Settings settings, final Path configPath) throws Exception { super(settings, configPath); - LocalStateCcr thisVar = this; - plugins.add(new Ccr(settings){ + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> true)) { + @Override protected XPackLicenseState getLicenseState() { - return thisVar.getLicenseState(); + return LocalStateCcr.this.getLicenseState(); } + }); } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index ea258ca6b6818..708287c16f658 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -47,7 +47,6 @@ import org.elasticsearch.xpack.core.XPackSettings; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -338,26 +337,12 @@ public void testFollowIndexAndCloseNode() throws Exception { ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); client().execute(FollowIndexAction.INSTANCE, followRequest).get(); - long maxNumDocsReplicated = Math.min(3000, randomLongBetween(followRequest.getMaxBatchOperationCount(), + long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), followRequest.getMaxBatchOperationCount() * 10)); long minNumDocsReplicated = maxNumDocsReplicated / 3L; logger.info("waiting for at least [{}] documents to be indexed and then stop a random data node", minNumDocsReplicated); - awaitBusy(() -> { - SearchRequest request = new SearchRequest("index2"); - request.source(new SearchSourceBuilder().size(0)); - SearchResponse response = client().search(request).actionGet(); - if (response.getHits().getTotalHits() >= minNumDocsReplicated) { - try { - internalCluster().stopRandomNonMasterNode(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - return true; - } else { - return false; - } - }, 30, TimeUnit.SECONDS); - + atLeastDocsIndexed("index2", minNumDocsReplicated); + internalCluster().stopRandomNonMasterNode(); logger.info("waiting for at least [{}] documents to be indexed", maxNumDocsReplicated); atLeastDocsIndexed("index2", maxNumDocsReplicated); run.set(false); @@ -548,7 +533,7 @@ private void unfollowIndex(String index) throws Exception { } } assertThat(numNodeTasks, equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } private CheckedRunnable assertExpectedDocumentRunnable(final int value) { @@ -660,7 +645,7 @@ private void atLeastDocsIndexed(String index, long numDocsReplicated) throws Int request.source(new SearchSourceBuilder().size(0)); SearchResponse response = client().search(request).actionGet(); return response.getHits().getTotalHits() >= numDocsReplicated; - }, 30, TimeUnit.SECONDS); + }, 60, TimeUnit.SECONDS); } private void assertSameDocCount(String index1, String index2) throws Exception { @@ -674,7 +659,7 @@ private void assertSameDocCount(String index1, String index2) throws Exception { request2.source(new SearchSourceBuilder().size(0)); SearchResponse response2 = client().search(request2).actionGet(); assertThat(response2.getHits().getTotalHits(), equalTo(response1.getHits().getTotalHits())); - }); + }, 60, TimeUnit.SECONDS); } public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java index 3088cf740a110..5b52700f5579b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java @@ -70,7 +70,7 @@ public void testValidation() throws IOException { IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5, Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); - mapperService.updateMapping(followIMD); + mapperService.updateMapping(null, followIMD); Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); assertThat(e.getMessage(), equalTo("mapper [field] of different type, current_type [text], merged_type [keyword]")); @@ -99,7 +99,7 @@ public void testValidation() throws IOException { IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followingIndexSettings, "index2"); - mapperService.updateMapping(followIMD); + mapperService.updateMapping(null, followIMD); IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); assertThat(error.getMessage(), equalTo("the following index [index2] is not ready to follow; " + @@ -112,7 +112,7 @@ public void testValidation() throws IOException { IndexMetaData followIMD = createIMD("index2", 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); - mapperService.updateMapping(followIMD); + mapperService.updateMapping(null, followIMD); FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); } { @@ -128,7 +128,7 @@ public void testValidation() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); - mapperService.updateMapping(followIMD); + mapperService.updateMapping(null, followIMD); FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); } { @@ -146,7 +146,7 @@ public void testValidation() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); - mapperService.updateMapping(followIMD); + mapperService.updateMapping(null, followIMD); FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index ac6d8f786fbe2..430e9cb48b1a8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -46,7 +46,7 @@ public void testGetOperations() throws Exception { .build(); final IndexService indexService = createIndex("index", settings); - final int numWrites = randomIntBetween(2, 8192); + final int numWrites = randomIntBetween(2, 4096); for (int i = 0; i < numWrites; i++) { client().prepareIndex("index", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java index 8e150b8f934e4..e9c67097d72b2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java @@ -12,7 +12,7 @@ public class ShardChangesResponseTests extends AbstractStreamableTestCase f.exception != null).count(); + assertThat(status.numberOfFailedFetches(), equalTo(numberOfFailedFetches)); + // the failures were able to be retried so fetch failures should have cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.mappingVersion(), equalTo(testRun.finalMappingVersion)); }); task.markAsCompleted(); @@ -82,12 +88,12 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR return new ShardFollowNodeTask( 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { - private volatile long indexMetadataVersion = 0L; + private volatile long mappingVersion = 0L; private final Map fromToSlot = new HashMap<>(); @Override protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { - handler.accept(indexMetadataVersion); + handler.accept(mappingVersion); } @Override @@ -128,7 +134,7 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co fromToSlot.put(from, ++slot); // if too many invocations occur with the same from then AOBE occurs, this ok and then something is wrong. } - indexMetadataVersion = testResponse.indexMetadataVersion; + mappingVersion = testResponse.mappingVersion; if (testResponse.exception != null) { errorHandler.accept(testResponse.exception); } else { @@ -181,15 +187,15 @@ private void tearDown() { }; } - private static TestRun createTestRun(long startSeqNo, long startIndexMetadataVersion, int maxOperationCount) { + private static TestRun createTestRun(long startSeqNo, long startMappingVersion, int maxOperationCount) { long prevGlobalCheckpoint = startSeqNo; - long indexMetaDataVersion = startIndexMetadataVersion; + long mappingVersion = startMappingVersion; int numResponses = randomIntBetween(16, 256); Map> responses = new HashMap<>(numResponses); for (int i = 0; i < numResponses; i++) { long nextGlobalCheckPoint = prevGlobalCheckpoint + maxOperationCount; if (sometimes()) { - indexMetaDataVersion++; + mappingVersion++; } if (sometimes()) { @@ -197,7 +203,7 @@ private static TestRun createTestRun(long startSeqNo, long startIndexMetadataVer // Sometimes add a random retryable error if (sometimes()) { Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); - item.add(new TestResponse(error, indexMetaDataVersion, null)); + item.add(new TestResponse(error, mappingVersion, null)); } List ops = new ArrayList<>(); for (long seqNo = prevGlobalCheckpoint; seqNo <= nextGlobalCheckPoint; seqNo++) { @@ -205,8 +211,8 @@ private static TestRun createTestRun(long startSeqNo, long startIndexMetadataVer byte[] source = "{}".getBytes(StandardCharsets.UTF_8); ops.add(new Translog.Index("doc", id, seqNo, 0, source)); } - item.add(new TestResponse(null, indexMetaDataVersion, - new ShardChangesAction.Response(indexMetaDataVersion, nextGlobalCheckPoint, nextGlobalCheckPoint, ops.toArray(EMPTY)))); + item.add(new TestResponse(null, mappingVersion, + new ShardChangesAction.Response(mappingVersion, nextGlobalCheckPoint, nextGlobalCheckPoint, ops.toArray(EMPTY)))); responses.put(prevGlobalCheckpoint, item); } else { // Simulates a leader shard copy not having all the operations the shard follow task thinks it has by @@ -218,13 +224,13 @@ private static TestRun createTestRun(long startSeqNo, long startIndexMetadataVer // Sometimes add a random retryable error if (sometimes()) { Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); - item.add(new TestResponse(error, indexMetaDataVersion, null)); + item.add(new TestResponse(error, mappingVersion, null)); } // Sometimes add an empty shard changes response to also simulate a leader shard lagging behind if (sometimes()) { ShardChangesAction.Response response = - new ShardChangesAction.Response(indexMetaDataVersion, prevGlobalCheckpoint, prevGlobalCheckpoint, EMPTY); - item.add(new TestResponse(null, indexMetaDataVersion, response)); + new ShardChangesAction.Response(mappingVersion, prevGlobalCheckpoint, prevGlobalCheckpoint, EMPTY); + item.add(new TestResponse(null, mappingVersion, response)); } List ops = new ArrayList<>(); for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { @@ -235,14 +241,14 @@ private static TestRun createTestRun(long startSeqNo, long startIndexMetadataVer // Report toSeqNo to simulate maxBatchSizeInBytes limit being met or last op to simulate a shard lagging behind: long localLeaderGCP = randomBoolean() ? ops.get(ops.size() - 1).seqNo() : toSeqNo; ShardChangesAction.Response response = - new ShardChangesAction.Response(indexMetaDataVersion, localLeaderGCP, localLeaderGCP, ops.toArray(EMPTY)); - item.add(new TestResponse(null, indexMetaDataVersion, response)); + new ShardChangesAction.Response(mappingVersion, localLeaderGCP, localLeaderGCP, ops.toArray(EMPTY)); + item.add(new TestResponse(null, mappingVersion, response)); responses.put(fromSeqNo, Collections.unmodifiableList(item)); } } prevGlobalCheckpoint = nextGlobalCheckPoint + 1; } - return new TestRun(maxOperationCount, startSeqNo, startIndexMetadataVersion, indexMetaDataVersion, + return new TestRun(maxOperationCount, startSeqNo, startMappingVersion, mappingVersion, prevGlobalCheckpoint - 1, responses); } @@ -255,18 +261,18 @@ private static class TestRun { final int maxOperationCount; final long startSeqNo; - final long startIndexMetadataVersion; + final long startMappingVersion; - final long finalIndexMetaDataVerion; + final long finalMappingVersion; final long finalExpectedGlobalCheckpoint; final Map> responses; - private TestRun(int maxOperationCount, long startSeqNo, long startIndexMetadataVersion, long finalIndexMetaDataVerion, + private TestRun(int maxOperationCount, long startSeqNo, long startMappingVersion, long finalMappingVersion, long finalExpectedGlobalCheckpoint, Map> responses) { this.maxOperationCount = maxOperationCount; this.startSeqNo = startSeqNo; - this.startIndexMetadataVersion = startIndexMetadataVersion; - this.finalIndexMetaDataVerion = finalIndexMetaDataVerion; + this.startMappingVersion = startMappingVersion; + this.finalMappingVersion = finalMappingVersion; this.finalExpectedGlobalCheckpoint = finalExpectedGlobalCheckpoint; this.responses = Collections.unmodifiableMap(responses); } @@ -275,12 +281,12 @@ private TestRun(int maxOperationCount, long startSeqNo, long startIndexMetadataV private static class TestResponse { final Exception exception; - final long indexMetadataVersion; + final long mappingVersion; final ShardChangesAction.Response response; - private TestResponse(Exception exception, long indexMetadataVersion, ShardChangesAction.Response response) { + private TestResponse(Exception exception, long mappingVersion, ShardChangesAction.Response response) { this.exception = exception; - this.indexMetadataVersion = indexMetadataVersion; + this.mappingVersion = mappingVersion; this.response = response; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java index 6138ba96d5439..8368a818e006e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -6,11 +6,20 @@ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { @@ -21,7 +30,9 @@ protected ShardFollowNodeTask.Status doParseInstance(XContentParser parser) thro @Override protected ShardFollowNodeTask.Status createTestInstance() { + // if you change this constructor, reflect the changes in the hand-written assertions below return new ShardFollowNodeTask.Status( + randomAlphaOfLength(4), randomInt(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -40,7 +51,60 @@ protected ShardFollowNodeTask.Status createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong()); + randomNonNegativeLong(), + randomReadExceptions(), + randomLong()); + } + + @Override + protected void assertEqualInstances(final ShardFollowNodeTask.Status expectedInstance, final ShardFollowNodeTask.Status newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); + assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); + assertThat(newInstance.leaderGlobalCheckpoint(), equalTo(expectedInstance.leaderGlobalCheckpoint())); + assertThat(newInstance.leaderMaxSeqNo(), equalTo(expectedInstance.leaderMaxSeqNo())); + assertThat(newInstance.followerGlobalCheckpoint(), equalTo(expectedInstance.followerGlobalCheckpoint())); + assertThat(newInstance.lastRequestedSeqNo(), equalTo(expectedInstance.lastRequestedSeqNo())); + assertThat(newInstance.numberOfConcurrentReads(), equalTo(expectedInstance.numberOfConcurrentReads())); + assertThat(newInstance.numberOfConcurrentWrites(), equalTo(expectedInstance.numberOfConcurrentWrites())); + assertThat(newInstance.numberOfQueuedWrites(), equalTo(expectedInstance.numberOfQueuedWrites())); + assertThat(newInstance.mappingVersion(), equalTo(expectedInstance.mappingVersion())); + assertThat(newInstance.totalFetchTimeMillis(), equalTo(expectedInstance.totalFetchTimeMillis())); + assertThat(newInstance.numberOfSuccessfulFetches(), equalTo(expectedInstance.numberOfSuccessfulFetches())); + assertThat(newInstance.numberOfFailedFetches(), equalTo(expectedInstance.numberOfFailedFetches())); + assertThat(newInstance.operationsReceived(), equalTo(expectedInstance.operationsReceived())); + assertThat(newInstance.totalTransferredBytes(), equalTo(expectedInstance.totalTransferredBytes())); + assertThat(newInstance.totalIndexTimeMillis(), equalTo(expectedInstance.totalIndexTimeMillis())); + assertThat(newInstance.numberOfSuccessfulBulkOperations(), equalTo(expectedInstance.numberOfSuccessfulBulkOperations())); + assertThat(newInstance.numberOfFailedBulkOperations(), equalTo(expectedInstance.numberOfFailedBulkOperations())); + assertThat(newInstance.numberOfOperationsIndexed(), equalTo(expectedInstance.numberOfOperationsIndexed())); + assertThat(newInstance.fetchExceptions().size(), equalTo(expectedInstance.fetchExceptions().size())); + assertThat(newInstance.fetchExceptions().keySet(), equalTo(expectedInstance.fetchExceptions().keySet())); + for (final Map.Entry entry : newInstance.fetchExceptions().entrySet()) { + // x-content loses the exception + final ElasticsearchException expected = expectedInstance.fetchExceptions().get(entry.getKey()); + assertThat(entry.getValue().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().getCause()); + assertThat( + entry.getValue().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().getCause().getMessage(), containsString(expected.getCause().getMessage())); + } + assertThat(newInstance.timeSinceLastFetchMillis(), equalTo(expectedInstance.timeSinceLastFetchMillis())); + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + + private NavigableMap randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put(randomNonNegativeLong(), new ElasticsearchException(new IllegalStateException("index [" + i + "]"))); + } + return readExceptions; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 9eda637dc9df2..4f7c0bf16645c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; @@ -20,8 +21,10 @@ import java.util.Collections; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; @@ -29,6 +32,8 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; @@ -39,11 +44,17 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private List> bulkShardOperationRequests; private BiConsumer scheduler = (delay, task) -> task.run(); + private Consumer beforeSendShardChangesRequest = status -> {}; + + private AtomicBoolean simulateResponse = new AtomicBoolean(); + private Queue readFailures; private Queue writeFailures; private Queue mappingUpdateFailures; - private Queue imdVersions; + private Queue mappingVersions; + private Queue leaderGlobalCheckpoints; private Queue followerGlobalCheckpoints; + private Queue maxSeqNos; public void testCoordinateReads() { ShardFollowNodeTask task = createShardFollowTask(8, between(8, 20), between(1, 20), Integer.MAX_VALUE, Long.MAX_VALUE); @@ -169,6 +180,27 @@ public void testReceiveRetryableError() { for (int i = 0; i < max; i++) { readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + simulateResponse.set(true); + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should clear + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; task.coordinateReads(); // NUmber of requests is equal to initial request + retried attempts @@ -178,10 +210,14 @@ public void testReceiveRetryableError() { assertThat(shardChangesRequest[1], equalTo(64L)); } - assertThat(task.isStopped(), equalTo(false)); + assertFalse("task is not stopped", task.isStopped()); ShardFollowNodeTask.Status status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo((long)max)); + assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); + // the fetch failure has cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } @@ -194,6 +230,23 @@ public void testReceiveRetryableErrorRetriedTooManyTimes() { for (int i = 0; i < max; i++) { readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should persist + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; task.coordinateReads(); assertThat(shardChangesRequests.size(), equalTo(11)); @@ -202,12 +255,22 @@ public void testReceiveRetryableErrorRetriedTooManyTimes() { assertThat(shardChangesRequest[1], equalTo(64L)); } - assertThat(task.isStopped(), equalTo(true)); + assertTrue("task is stopped", task.isStopped()); assertThat(fatalError, notNullValue()); assertThat(fatalError.getMessage(), containsString("retrying failed [")); ShardFollowNodeTask.Status status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(11L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } @@ -216,19 +279,38 @@ public void testReceiveNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - Exception failure = new RuntimeException(); + Exception failure = new RuntimeException("replication failed"); readFailures.add(failure); + final AtomicBoolean invoked = new AtomicBoolean(); + // since there will be only one failure, this should only be invoked once and there should not be a fetch failure + beforeSendShardChangesRequest = status -> { + if (invoked.compareAndSet(false, true)) { + assertThat(status.numberOfFailedFetches(), equalTo(0L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + } else { + fail("invoked twice"); + } + }; task.coordinateReads(); assertThat(shardChangesRequests.size(), equalTo(1)); assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - assertThat(task.isStopped(), equalTo(true)); + assertTrue("task is stopped", task.isStopped()); assertThat(fatalError, sameInstance(failure)); ShardFollowNodeTask.Status status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(1L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(RuntimeException.class)); + final RuntimeException cause = (RuntimeException) entry.getValue().getCause(); + assertThat(cause.getMessage(), equalTo("replication failed")); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } @@ -245,7 +327,7 @@ public void testHandleReadResponse() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.indexMetadataVersion(), equalTo(0L)); + assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); @@ -351,7 +433,7 @@ public void testMappingUpdate() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - imdVersions.add(1L); + mappingVersions.add(1L); task.coordinateReads(); ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); task.handleReadResponse(0L, 63L, response); @@ -360,7 +442,7 @@ public void testMappingUpdate() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.indexMetadataVersion(), equalTo(1L)); + assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -376,7 +458,7 @@ public void testMappingUpdateRetryableError() { for (int i = 0; i < max; i++) { mappingUpdateFailures.add(new ConnectException()); } - imdVersions.add(1L); + mappingVersions.add(1L); task.coordinateReads(); ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); task.handleReadResponse(0L, 63L, response); @@ -385,7 +467,7 @@ public void testMappingUpdateRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(task.isStopped(), equalTo(false)); ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.indexMetadataVersion(), equalTo(1L)); + assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -401,17 +483,17 @@ public void testMappingUpdateRetryableErrorRetriedTooManyTimes() { for (int i = 0; i < max; i++) { mappingUpdateFailures.add(new ConnectException()); } - imdVersions.add(1L); + mappingVersions.add(1L); task.coordinateReads(); ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); task.handleReadResponse(0L, 64L, response); assertThat(mappingUpdateFailures.size(), equalTo(max - 11)); - assertThat(imdVersions.size(), equalTo(1)); + assertThat(mappingVersions.size(), equalTo(1)); assertThat(bulkShardOperationRequests.size(), equalTo(0)); assertThat(task.isStopped(), equalTo(true)); ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.indexMetadataVersion(), equalTo(0L)); + assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -430,7 +512,7 @@ public void testMappingUpdateNonRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(0)); assertThat(task.isStopped(), equalTo(true)); ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.indexMetadataVersion(), equalTo(0L)); + assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -641,8 +723,10 @@ ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, int maxCon readFailures = new LinkedList<>(); writeFailures = new LinkedList<>(); mappingUpdateFailures = new LinkedList<>(); - imdVersions = new LinkedList<>(); + mappingVersions = new LinkedList<>(); + leaderGlobalCheckpoints = new LinkedList<>(); followerGlobalCheckpoints = new LinkedList<>(); + maxSeqNos = new LinkedList<>(); return new ShardFollowNodeTask( 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { @@ -654,9 +738,9 @@ protected void innerUpdateMapping(LongConsumer handler, Consumer erro return; } - Long imdVersion = imdVersions.poll(); - if (imdVersion != null) { - handler.accept(imdVersion); + final Long mappingVersion = mappingVersions.poll(); + if (mappingVersion != null) { + handler.accept(mappingVersion); } } @@ -683,10 +767,23 @@ protected void innerSendBulkShardOperationsRequest( @Override protected void innerSendShardChangesRequest(long from, int requestBatchSize, Consumer handler, Consumer errorHandler) { + beforeSendShardChangesRequest.accept(getStatus()); shardChangesRequests.add(new long[]{from, requestBatchSize}); Exception readFailure = ShardFollowNodeTaskTests.this.readFailures.poll(); if (readFailure != null) { errorHandler.accept(readFailure); + } else if (simulateResponse.get()) { + final Translog.Operation[] operations = new Translog.Operation[requestBatchSize]; + for (int i = 0; i < requestBatchSize; i++) { + operations[i] = new Translog.NoOp(from + i, 0, "test"); + } + final ShardChangesAction.Response response = + new ShardChangesAction.Response( + mappingVersions.poll(), + leaderGlobalCheckpoints.poll(), + maxSeqNos.poll(), + operations); + handler.accept(response); } } @@ -708,7 +805,7 @@ public void markAsFailed(Exception e) { }; } - private static ShardChangesAction.Response generateShardChangesResponse(long fromSeqNo, long toSeqNo, long imdVersion, + private static ShardChangesAction.Response generateShardChangesResponse(long fromSeqNo, long toSeqNo, long mappingVersion, long leaderGlobalCheckPoint) { List ops = new ArrayList<>(); for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { @@ -717,7 +814,7 @@ private static ShardChangesAction.Response generateShardChangesResponse(long fro ops.add(new Translog.Index("doc", id, seqNo, 0, source)); } return new ShardChangesAction.Response( - imdVersion, leaderGlobalCheckPoint, leaderGlobalCheckPoint, ops.toArray(new Translog.Operation[0])); + mappingVersion, leaderGlobalCheckPoint, leaderGlobalCheckPoint, ops.toArray(new Translog.Operation[0])); } void startTask(ShardFollowNodeTask task, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 32cc876125701..ec180943a3b5b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -209,7 +209,7 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co try { Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, maxOperationCount, params.getMaxBatchSizeInBytes()); - // Hard code index metadata version, this is ok, as mapping updates are not tested here. + // hard code mapping version; this is ok, as mapping updates are not tested here final ShardChangesAction.Response response = new ShardChangesAction.Response(1L, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), ops); handler.accept(response); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java index fa06abe65ba6a..4c6c0c060e45a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -61,7 +61,7 @@ public void testPrimaryTermFromFollower() throws IOException { final TransportWriteAction.WritePrimaryResult result = TransportBulkShardOperationsAction.shardOperationOnPrimary(followerPrimary.shardId(), operations, followerPrimary, logger); - try (Translog.Snapshot snapshot = followerPrimary.newTranslogSnapshotFromMinSeqNo(0)) { + try (Translog.Snapshot snapshot = followerPrimary.getHistoryOperations("test", 0)) { assertThat(snapshot.totalOperations(), equalTo(operations.size())); Translog.Operation operation; while ((operation = snapshot.next()) != null) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index ff877724ce4b5..677b8955490da 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -280,7 +280,7 @@ private FollowingEngine createEngine(Store store, EngineConfig config) throws IO SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L); store.associateIndexWithNewTranslog(translogUuid); FollowingEngine followingEngine = new FollowingEngine(config); - followingEngine.recoverFromTranslog(); + followingEngine.recoverFromTranslog(Long.MAX_VALUE); return followingEngine; } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index a3b4bea9702f5..a58500b880f94 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -8,7 +8,6 @@ import java.nio.file.StandardCopyOption apply plugin: 'elasticsearch.esplugin' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -apply plugin: 'com.github.johnrengelman.shadow' archivesBaseName = 'x-pack-core' @@ -27,19 +26,18 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compile project(':x-pack:protocol') - shadow "org.apache.httpcomponents:httpclient:${versions.httpclient}" - shadow "org.apache.httpcomponents:httpcore:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - shadow "commons-logging:commons-logging:${versions.commonslogging}" - shadow "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" // security deps - shadow 'com.unboundid:unboundid-ldapsdk:3.2.0' - shadow project(path: ':modules:transport-netty4', configuration: 'runtime') - shadow(project(path: ':plugins:transport-nio', configuration: 'runtime')) { + compile 'com.unboundid:unboundid-ldapsdk:3.2.0' + compile project(path: ':modules:transport-netty4', configuration: 'runtime') + compile(project(path: ':plugins:transport-nio', configuration: 'runtime')) { // TODO: core exclusion should not be necessary, since it is a transitive dep of all plugins exclude group: "org.elasticsearch", module: "elasticsearch-core" } @@ -112,8 +110,7 @@ test { // TODO: don't publish test artifacts just to run messy tests, fix the tests! // https://github.com/elastic/x-plugins/issues/724 configurations { - testArtifacts.extendsFrom(testRuntime, shadow) - testArtifacts.exclude(group: project(':x-pack:protocol').group, module: project(':x-pack:protocol').name) + testArtifacts.extendsFrom testRuntime } task testJar(type: Jar) { appendix 'test' diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index e087438924394..0619aef6961cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -120,7 +120,7 @@ public LicenseService(Settings settings, ClusterService clusterService, Clock cl super(settings); this.clusterService = clusterService; this.clock = clock; - this.scheduler = new SchedulerEngine(clock); + this.scheduler = new SchedulerEngine(settings, clock); this.licenseState = licenseState; this.operationModeFileWatcher = new OperationModeFileWatcher(resourceWatcherService, XPackPlugin.resolveConfigFile(env, "license_mode"), logger, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java new file mode 100644 index 0000000000000..e7460d5a2eb38 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -0,0 +1,282 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Checks remote clusters for license compatibility with a specified license predicate. + */ +public final class RemoteClusterLicenseChecker { + + /** + * Encapsulates the license info of a remote cluster. + */ + public static final class RemoteClusterLicenseInfo { + + private final String clusterAlias; + + /** + * The alias of the remote cluster. + * + * @return the cluster alias + */ + public String clusterAlias() { + return clusterAlias; + } + + private final XPackInfoResponse.LicenseInfo licenseInfo; + + /** + * The license info of the remote cluster. + * + * @return the license info + */ + public XPackInfoResponse.LicenseInfo licenseInfo() { + return licenseInfo; + } + + RemoteClusterLicenseInfo(final String clusterAlias, final XPackInfoResponse.LicenseInfo licenseInfo) { + this.clusterAlias = clusterAlias; + this.licenseInfo = licenseInfo; + } + + } + + /** + * Encapsulates a remote cluster license check. The check is either successful if the license of the remote cluster is compatible with + * the predicate used to check license compatibility, or the check is a failure. + */ + public static final class LicenseCheck { + + private final RemoteClusterLicenseInfo remoteClusterLicenseInfo; + + /** + * The remote cluster license info. This method should only be invoked if this instance represents a failing license check. + * + * @return the remote cluster license info + */ + public RemoteClusterLicenseInfo remoteClusterLicenseInfo() { + assert isSuccess() == false; + return remoteClusterLicenseInfo; + } + + private static final LicenseCheck SUCCESS = new LicenseCheck(null); + + /** + * A successful license check. + * + * @return a successful license check instance + */ + public static LicenseCheck success() { + return SUCCESS; + } + + /** + * Test if this instance represents a successful license check. + * + * @return true if this instance represents a successful license check, otherwise false + */ + public boolean isSuccess() { + return this == SUCCESS; + } + + /** + * Creates a failing license check encapsulating the specified remote cluster license info. + * + * @param remoteClusterLicenseInfo the remote cluster license info + * @return a failing license check + */ + public static LicenseCheck failure(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + return new LicenseCheck(remoteClusterLicenseInfo); + } + + private LicenseCheck(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + this.remoteClusterLicenseInfo = remoteClusterLicenseInfo; + } + + } + + private final Client client; + private final Predicate predicate; + + /** + * Constructs a remote cluster license checker with the specified license predicate for checking license compatibility. The predicate + * does not need to check for the active license state as this is handled by the remote cluster license checker. + * + * @param client the client + * @param predicate the license predicate + */ + public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { + this.client = client; + this.predicate = predicate; + } + + public static boolean isLicensePlatinumOrTrial(final XPackInfoResponse.LicenseInfo licenseInfo) { + final License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); + return mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL; + } + + /** + * Checks the specified clusters for license compatibility. The specified callback will be invoked once if all clusters are + * license-compatible, otherwise the specified callback will be invoked once on the first cluster that is not license-compatible. + * + * @param clusterAliases the cluster aliases to check + * @param listener a callback + */ + public void checkRemoteClusterLicenses(final List clusterAliases, final ActionListener listener) { + final Iterator clusterAliasesIterator = clusterAliases.iterator(); + if (clusterAliasesIterator.hasNext() == false) { + listener.onResponse(LicenseCheck.success()); + return; + } + + final AtomicReference clusterAlias = new AtomicReference<>(); + + final ActionListener infoListener = new ActionListener() { + + @Override + public void onResponse(final XPackInfoResponse xPackInfoResponse) { + final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); + if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false + || predicate.test(License.OperationMode.resolve(licenseInfo.getMode())) == false) { + listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); + return; + } + + if (clusterAliasesIterator.hasNext()) { + clusterAlias.set(clusterAliasesIterator.next()); + // recurse to the next cluster + remoteClusterLicense(clusterAlias.get(), this); + } else { + listener.onResponse(LicenseCheck.success()); + } + } + + @Override + public void onFailure(final Exception e) { + final String message = "could not determine the license type for cluster [" + clusterAlias.get() + "]"; + listener.onFailure(new ElasticsearchException(message, e)); + } + + }; + + // check the license on the first cluster, and then we recursively check licenses on the remaining clusters + clusterAlias.set(clusterAliasesIterator.next()); + remoteClusterLicense(clusterAlias.get(), infoListener); + } + + private void remoteClusterLicense(final String clusterAlias, final ActionListener listener) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final ContextPreservingActionListener contextPreservingActionListener = + new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + + final XPackInfoRequest request = new XPackInfoRequest(); + request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); + try { + client.getRemoteClusterClient(clusterAlias).execute(XPackInfoAction.INSTANCE, request, contextPreservingActionListener); + } catch (final Exception e) { + contextPreservingActionListener.onFailure(e); + } + } + } + + /** + * Predicate to test if the index name represents the name of a remote index. + * + * @param index the index name + * @return true if the collection of indices contains a remote index, otherwise false + */ + public static boolean isRemoteIndex(final String index) { + return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; + } + + /** + * Predicate to test if the collection of index names contains any that represent the name of a remote index. + * + * @param indices the collection of index names + * @return true if the collection of index names contains a name that represents a remote index, otherwise false + */ + public static boolean containsRemoteIndex(final List indices) { + return indices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); + } + + /** + * Filters the collection of index names for names that represent a remote index. Remote index names are of the form + * {@code cluster_name:index_name}. + * + * @param indices the collection of index names + * @return list of index names that represent remote index names + */ + public static List remoteIndices(final List indices) { + return indices.stream().filter(RemoteClusterLicenseChecker::isRemoteIndex).collect(Collectors.toList()); + } + + /** + * Extract the list of remote cluster aliases from the list of index names. Remote index names are of the form + * {@code cluster_alias:index_name} and the cluster_alias is extracted for each index name that represents a remote index. + * + * @param indices the collection of index names + * @return the remote cluster names + */ + public static List remoteClusterAliases(final List indices) { + return indices.stream() + .filter(RemoteClusterLicenseChecker::isRemoteIndex) + .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) + .distinct() + .collect(Collectors.toList()); + } + + /** + * Constructs an error message for license incompatibility. + * + * @param feature the name of the feature that initiated the remote cluster license check. + * @param remoteClusterLicenseInfo the remote cluster license info of the cluster that failed the license check + * @return an error message representing license incompatibility + */ + public static String buildErrorMessage( + final String feature, + final RemoteClusterLicenseInfo remoteClusterLicenseInfo, + final Predicate predicate) { + final StringBuilder error = new StringBuilder(); + if (remoteClusterLicenseInfo.licenseInfo().getStatus() != LicenseStatus.ACTIVE) { + error.append(String.format(Locale.ROOT, "the license on cluster [%s] is not active", remoteClusterLicenseInfo.clusterAlias())); + } else { + assert predicate.test(remoteClusterLicenseInfo.licenseInfo()) == false : "license must be incompatible to build error message"; + final String message = String.format( + Locale.ROOT, + "the license mode [%s] on cluster [%s] does not enable [%s]", + License.OperationMode.resolve(remoteClusterLicenseInfo.licenseInfo().getMode()), + remoteClusterLicenseInfo.clusterAlias(), + feature); + error.append(message); + } + + return error.toString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 722c9d0e711aa..37176803d4fdf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -410,10 +410,20 @@ public AllowedRealmType allowedRealmType() { */ public boolean isCustomRoleProvidersAllowed() { final Status localStatus = status; - return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL ) + return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) && localStatus.active; } + /** + * @return whether "authorization_realms" are allowed based on the license {@link OperationMode} + * @see org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings + */ + public boolean isAuthorizationRealmAllowed() { + final Status localStatus = status; + return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) + && localStatus.active; + } + /** * Determine if Watcher is available based on the current license. *

    @@ -514,13 +524,13 @@ public boolean isGraphAllowed() { * {@code false}. */ public boolean isMachineLearningAllowed() { - // status is volatile - Status localStatus = status; - OperationMode operationMode = localStatus.mode; - - boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isMachineLearningAllowedForOperationMode(currentStatus.mode); + } - return licensed && localStatus.active; + public static boolean isMachineLearningAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); } /** @@ -612,4 +622,30 @@ public boolean isSecurityEnabled() { final OperationMode mode = status.mode; return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; } + + /** + * Determine if cross-cluster replication should be enabled. + *

    + * Cross-cluster replication is only disabled when the license has expired or if the mode is not: + *

      + *
    • {@link OperationMode#PLATINUM}
    • + *
    • {@link OperationMode#TRIAL}
    • + *
    + * + * @return true is the license is compatible, otherwise false + */ + public boolean isCcrAllowed() { + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isCcrAllowedForOperationMode(currentStatus.mode); + } + + public static boolean isCcrAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); + } + + public static boolean isPlatinumOrTrialOperationMode(final OperationMode operationMode) { + return operationMode == OperationMode.PLATINUM || operationMode == OperationMode.TRIAL; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java new file mode 100644 index 0000000000000..41f066daf93d3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Locale; + +/** + * Fetch information about X-Pack from the cluster. + */ +public class XPackInfoRequest extends ActionRequest { + + public enum Category { + BUILD, LICENSE, FEATURES; + + public static EnumSet toSet(String... categories) { + EnumSet set = EnumSet.noneOf(Category.class); + for (String category : categories) { + switch (category) { + case "_all": + return EnumSet.allOf(Category.class); + case "_none": + return EnumSet.noneOf(Category.class); + default: + set.add(Category.valueOf(category.toUpperCase(Locale.ROOT))); + } + } + return set; + } + } + + private boolean verbose; + private EnumSet categories = EnumSet.noneOf(Category.class); + + public XPackInfoRequest() {} + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + public boolean isVerbose() { + return verbose; + } + + public void setCategories(EnumSet categories) { + this.categories = categories; + } + + public EnumSet getCategories() { + return categories; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.verbose = in.readBoolean(); + EnumSet categories = EnumSet.noneOf(Category.class); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + categories.add(Category.valueOf(in.readString())); + } + this.categories = categories; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(verbose); + out.writeVInt(categories.size()); + for (Category category : categories) { + out.writeString(category.name()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java new file mode 100644 index 0000000000000..b51a451a67faa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class XPackInfoResponse extends ActionResponse implements ToXContentObject { + /** + * Value of the license's expiration time if it should never expire. + */ + public static final long BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS = Long.MAX_VALUE - TimeUnit.HOURS.toMillis(24 * 365); + // TODO move this constant to License.java once we move License.java to the protocol jar + + @Nullable private BuildInfo buildInfo; + @Nullable private LicenseInfo licenseInfo; + @Nullable private FeatureSetsInfo featureSetsInfo; + + public XPackInfoResponse() {} + + public XPackInfoResponse(@Nullable BuildInfo buildInfo, @Nullable LicenseInfo licenseInfo, @Nullable FeatureSetsInfo featureSetsInfo) { + this.buildInfo = buildInfo; + this.licenseInfo = licenseInfo; + this.featureSetsInfo = featureSetsInfo; + } + + /** + * @return The build info (incl. build hash and timestamp) + */ + public BuildInfo getBuildInfo() { + return buildInfo; + } + + /** + * @return The current license info (incl. UID, type/mode. status and expiry date). May return {@code null} when no + * license is currently installed. + */ + public LicenseInfo getLicenseInfo() { + return licenseInfo; + } + + /** + * @return The current status of the feature sets in X-Pack. Feature sets describe the features available/enabled in X-Pack. + */ + public FeatureSetsInfo getFeatureSetsInfo() { + return featureSetsInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(buildInfo); + out.writeOptionalWriteable(licenseInfo); + out.writeOptionalWriteable(featureSetsInfo); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.buildInfo = in.readOptionalWriteable(BuildInfo::new); + this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); + this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + XPackInfoResponse rhs = (XPackInfoResponse) other; + return Objects.equals(buildInfo, rhs.buildInfo) + && Objects.equals(licenseInfo, rhs.licenseInfo) + && Objects.equals(featureSetsInfo, rhs.featureSetsInfo); + } + + @Override + public int hashCode() { + return Objects.hash(buildInfo, licenseInfo, featureSetsInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, false); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "xpack_info_response", true, (a, v) -> { + BuildInfo buildInfo = (BuildInfo) a[0]; + LicenseInfo licenseInfo = (LicenseInfo) a[1]; + @SuppressWarnings("unchecked") // This is how constructing object parser works + List featureSets = (List) a[2]; + FeatureSetsInfo featureSetsInfo = featureSets == null ? null : new FeatureSetsInfo(new HashSet<>(featureSets)); + return new XPackInfoResponse(buildInfo, licenseInfo, featureSetsInfo); + }); + static { + PARSER.declareObject(optionalConstructorArg(), BuildInfo.PARSER, new ParseField("build")); + /* + * licenseInfo is sort of "double optional" because it is + * optional but it can also be send as `null`. + */ + PARSER.declareField(optionalConstructorArg(), (p, v) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + return LicenseInfo.PARSER.parse(p, v); + }, + new ParseField("license"), ValueType.OBJECT_OR_NULL); + PARSER.declareNamedObjects(optionalConstructorArg(), + (p, c, name) -> FeatureSetsInfo.FeatureSet.PARSER.parse(p, name), + new ParseField("features")); + } + public static XPackInfoResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (buildInfo != null) { + builder.field("build", buildInfo, params); + } + + EnumSet categories = XPackInfoRequest.Category + .toSet(Strings.splitStringByCommaToArray(params.param("categories", "_all"))); + if (licenseInfo != null) { + builder.field("license", licenseInfo, params); + } else if (categories.contains(XPackInfoRequest.Category.LICENSE)) { + // if the user requested the license info, and there is no license, we should send + // back an explicit null value (indicating there is no license). This is different + // than not adding the license info at all + builder.nullField("license"); + } + + if (featureSetsInfo != null) { + builder.field("features", featureSetsInfo, params); + } + + if (params.paramAsBoolean("human", true)) { + builder.field("tagline", "You know, for X"); + } + + return builder.endObject(); + } + + public static class LicenseInfo implements ToXContentObject, Writeable { + private final String uid; + private final String type; + private final String mode; + private final LicenseStatus status; + private final long expiryDate; + + public LicenseInfo(String uid, String type, String mode, LicenseStatus status, long expiryDate) { + this.uid = uid; + this.type = type; + this.mode = mode; + this.status = status; + this.expiryDate = expiryDate; + } + + public LicenseInfo(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString(), LicenseStatus.readFrom(in), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uid); + out.writeString(type); + out.writeString(mode); + status.writeTo(out); + out.writeLong(expiryDate); + } + + public String getUid() { + return uid; + } + + public String getType() { + return type; + } + + public String getMode() { + return mode; + } + + public long getExpiryDate() { + return expiryDate; + } + + public LicenseStatus getStatus() { + return status; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + LicenseInfo rhs = (LicenseInfo) other; + return Objects.equals(uid, rhs.uid) + && Objects.equals(type, rhs.type) + && Objects.equals(mode, rhs.mode) + && Objects.equals(status, rhs.status) + && expiryDate == rhs.expiryDate; + } + + @Override + public int hashCode() { + return Objects.hash(uid, type, mode, status, expiryDate); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "license_info", true, (a, v) -> { + String uid = (String) a[0]; + String type = (String) a[1]; + String mode = (String) a[2]; + LicenseStatus status = LicenseStatus.fromString((String) a[3]); + Long expiryDate = (Long) a[4]; + long primitiveExpiryDate = expiryDate == null ? BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS : expiryDate; + return new LicenseInfo(uid, type, mode, status, primitiveExpiryDate); + }); + static { + PARSER.declareString(constructorArg(), new ParseField("uid")); + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareString(constructorArg(), new ParseField("mode")); + PARSER.declareString(constructorArg(), new ParseField("status")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("expiry_date_in_millis")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("uid", uid) + .field("type", type) + .field("mode", mode) + .field("status", status.label()); + if (expiryDate != BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate); + } + return builder.endObject(); + } + } + + public static class BuildInfo implements ToXContentObject, Writeable { + private final String hash; + private final String timestamp; + + public BuildInfo(String hash, String timestamp) { + this.hash = hash; + this.timestamp = timestamp; + } + + public BuildInfo(StreamInput input) throws IOException { + this(input.readString(), input.readString()); + } + + @Override + public void writeTo(StreamOutput output) throws IOException { + output.writeString(hash); + output.writeString(timestamp); + } + + public String getHash() { + return hash; + } + + public String getTimestamp() { + return timestamp; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + BuildInfo rhs = (BuildInfo) other; + return Objects.equals(hash, rhs.hash) + && Objects.equals(timestamp, rhs.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(hash, timestamp); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "build_info", true, (a, v) -> new BuildInfo((String) a[0], (String) a[1])); + static { + PARSER.declareString(constructorArg(), new ParseField("hash")); + PARSER.declareString(constructorArg(), new ParseField("date")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("hash", hash) + .field("date", timestamp) + .endObject(); + } + } + + public static class FeatureSetsInfo implements ToXContentObject, Writeable { + private final Map featureSets; + + public FeatureSetsInfo(Set featureSets) { + Map map = new HashMap<>(featureSets.size()); + for (FeatureSet featureSet : featureSets) { + map.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(map); + } + + public FeatureSetsInfo(StreamInput in) throws IOException { + int size = in.readVInt(); + Map featureSets = new HashMap<>(size); + for (int i = 0; i < size; i++) { + FeatureSet featureSet = new FeatureSet(in); + featureSets.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(featureSets); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(featureSets.size()); + for (FeatureSet featureSet : featureSets.values()) { + featureSet.writeTo(out); + } + } + + public Map getFeatureSets() { + return featureSets; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSetsInfo rhs = (FeatureSetsInfo) other; + return Objects.equals(featureSets, rhs.featureSets); + } + + @Override + public int hashCode() { + return Objects.hash(featureSets); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + List names = new ArrayList<>(this.featureSets.keySet()).stream().sorted().collect(Collectors.toList()); + for (String name : names) { + builder.field(name, featureSets.get(name), params); + } + return builder.endObject(); + } + + public static class FeatureSet implements ToXContentObject, Writeable { + private final String name; + @Nullable private final String description; + private final boolean available; + private final boolean enabled; + @Nullable private final Map nativeCodeInfo; + + public FeatureSet(String name, @Nullable String description, boolean available, boolean enabled, + @Nullable Map nativeCodeInfo) { + this.name = name; + this.description = description; + this.available = available; + this.enabled = enabled; + this.nativeCodeInfo = nativeCodeInfo; + } + + public FeatureSet(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeBoolean(available); + out.writeBoolean(enabled); + out.writeMap(nativeCodeInfo); + } + + public String name() { + return name; + } + + @Nullable + public String description() { + return description; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Nullable + public Map nativeCodeInfo() { + return nativeCodeInfo; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSet rhs = (FeatureSet) other; + return Objects.equals(name, rhs.name) + && Objects.equals(description, rhs.description) + && available == rhs.available + && enabled == rhs.enabled + && Objects.equals(nativeCodeInfo, rhs.nativeCodeInfo); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, available, enabled, nativeCodeInfo); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature_set", true, (a, name) -> { + String description = (String) a[0]; + boolean available = (Boolean) a[1]; + boolean enabled = (Boolean) a[2]; + @SuppressWarnings("unchecked") // Matches up with declaration below + Map nativeCodeInfo = (Map) a[3]; + return new FeatureSet(name, description, available, enabled, nativeCodeInfo); + }); + static { + PARSER.declareString(optionalConstructorArg(), new ParseField("description")); + PARSER.declareBoolean(constructorArg(), new ParseField("available")); + PARSER.declareBoolean(constructorArg(), new ParseField("enabled")); + PARSER.declareObject(optionalConstructorArg(), (p, name) -> p.map(), new ParseField("native_code_info")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (description != null) { + builder.field("description", description); + } + builder.field("available", available); + builder.field("enabled", enabled); + if (nativeCodeInfo != null) { + builder.field("native_code_info", nativeCodeInfo); + } + return builder.endObject(); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java new file mode 100644 index 0000000000000..83621a9ac3d41 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; + +public class XPackUsageRequest extends MasterNodeRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java new file mode 100644 index 0000000000000..ccf681837fdcd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Response object from calling the xpack usage api. + * + * Usage information for each feature is accessible through {@link #getUsages()}. + */ +public class XPackUsageResponse { + + private final Map> usages; + + private XPackUsageResponse(Map> usages) throws IOException { + this.usages = usages; + } + + @SuppressWarnings("unchecked") + private static Map castMap(Object value) { + return (Map)value; + } + + /** Return a map from feature name to usage information for that feature. */ + public Map> getUsages() { + return usages; + } + + public static XPackUsageResponse fromXContent(XContentParser parser) throws IOException { + Map rawMap = parser.map(); + Map> usages = rawMap.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> castMap(e.getValue()))); + return new XPackUsageResponse(usages); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java new file mode 100644 index 0000000000000..3934095512120 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import java.util.Arrays; +import java.util.Map; + +/** + * Common utilities used for XPack protocol classes + */ +public final class ProtocolUtils { + + /** + * Implements equals for a map of string arrays + * + * The map of string arrays is used in some XPack protocol classes but does't work with equal. + */ + public static boolean equals(Map a, Map b) { + if (a == null) { + return b == null; + } + if (b == null) { + return false; + } + if (a.size() != b.size()) { + return false; + } + for (Map.Entry entry : a.entrySet()) { + String[] val = entry.getValue(); + String key = entry.getKey(); + if (val == null) { + if (b.get(key) != null || b.containsKey(key) == false) { + return false; + } + } else { + if (Arrays.equals(val, b.get(key)) == false) { + return false; + } + } + } + return true; + } + + /** + * Implements hashCode for map of string arrays + * + * The map of string arrays does't work with hashCode. + */ + public static int hashCode(Map a) { + int hash = 0; + for (Map.Entry entry : a.entrySet()) + hash += Arrays.hashCode(entry.getValue()); + return hash; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java similarity index 51% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java index f3d9289644918..994c7e2c2d5a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -3,18 +3,25 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; +import java.util.List; import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * A Connection links exactly two {@link Vertex} objects. The basis of a @@ -23,10 +30,10 @@ * as a weight. */ public class Connection { - Vertex from; - Vertex to; - double weight; - long docCount; + private Vertex from; + private Vertex to; + private double weight; + private long docCount; public Connection(Vertex from, Vertex to, double weight, long docCount) { this.from = from; @@ -35,7 +42,7 @@ public Connection(Vertex from, Vertex to, double weight, long docCount) { this.docCount = docCount; } - void readFrom(StreamInput in, Map vertices) throws IOException { + public Connection(StreamInput in, Map vertices) throws IOException { from = vertices.get(new VertexId(in.readString(), in.readString())); to = vertices.get(new VertexId(in.readString(), in.readString())); weight = in.readDouble(); @@ -80,13 +87,81 @@ public double getWeight() { public long getDocCount() { return docCount; } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { - builder.field("source", vertexNumbers.get(from)); - builder.field("target", vertexNumbers.get(to)); - builder.field("weight", weight); - builder.field("doc_count", docCount); + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); + } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } + /** * An identifier (implements hashcode and equals) that represents a diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java similarity index 72% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index e44f9f7603752..196982c0a35fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -14,6 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; @@ -29,7 +31,7 @@ * Holds the criteria required to guide the exploration of connected terms which * can be returned as a graph. */ -public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable { +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; @@ -51,8 +53,8 @@ public GraphExploreRequest() { } /** - * Constructs a new graph request to run against the provided - * indices. No indices means it will run against all indices. + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. */ public GraphExploreRequest(String... indices) { this.indices = indices; @@ -75,7 +77,6 @@ public String[] indices() { return this.indices; } - @Override public GraphExploreRequest indices(String... indices) { this.indices = indices; @@ -123,10 +124,14 @@ public TimeValue timeout() { } /** - * Graph exploration can be set to timeout after the given period. Search operations involved in - * each hop are limited to the remaining time available but can still overrun due to the nature - * of their "best efforts" timeout support. When a timeout occurs partial results are returned. - * @param timeout a {@link TimeValue} object which determines the maximum length of time to spend exploring + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring */ public GraphExploreRequest timeout(TimeValue timeout) { if (timeout == null) { @@ -153,10 +158,10 @@ public void readFrom(StreamInput in) throws IOException { sampleSize = in.readInt(); sampleDiversityField = in.readOptionalString(); maxDocsPerDiversityValue = in.readInt(); - + useSignificance = in.readBoolean(); returnDetailedInfo = in.readBoolean(); - + int numHops = in.readInt(); Hop parentHop = null; for (int i = 0; i < numHops; i++) { @@ -180,7 +185,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(sampleSize); out.writeOptionalString(sampleDiversityField); out.writeInt(maxDocsPerDiversityValue); - + out.writeBoolean(useSignificance); out.writeBoolean(returnDetailedInfo); out.writeInt(hops.size()); @@ -196,18 +201,21 @@ public String toString() { } /** - * The number of top-matching documents that are considered during each hop (default is - * {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} - * Very small values (less than 50) may not provide sufficient weight-of-evidence to identify - * significant connections between terms. - *

    Very large values (many thousands) are not recommended with loosely defined queries (fuzzy queries or those - * with many OR clauses). - * This is because any useful signals in the best documents are diluted with irrelevant noise from low-quality matches. - * Performance is also typically better with smaller samples as there are less look-ups required for background frequencies - * of terms found in the documents + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

    + * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents *

    * - * @param maxNumberOfDocsPerHop shard-level sample size in documents + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents */ public void sampleSize(int maxNumberOfDocsPerHop) { sampleSize = maxNumberOfDocsPerHop; @@ -242,10 +250,13 @@ public int maxDocsPerDiversityValue() { } /** - * Controls the choice of algorithm used to select interesting terms. The default - * value is true which means terms are selected based on significance (see the {@link SignificantTerms} - * aggregation) rather than popularity (using the {@link TermsAggregator}). - * @param value true if the significant_terms algorithm should be used. + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. */ public void useSignificance(boolean value) { this.useSignificance = value; @@ -254,32 +265,37 @@ public void useSignificance(boolean value) { public boolean useSignificance() { return useSignificance; } - + /** - * Return detailed information about vertex frequencies as part of JSON results - defaults to false - * @param value true if detailed information is required in JSON responses + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses */ public void returnDetailedInfo(boolean value) { this.returnDetailedInfo = value; - } + } public boolean returnDetailedInfo() { return returnDetailedInfo; } - /** - * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected - * to other terms in a subsequent hop. - * @param guidingQuery optional choice of query which influences which documents - * are considered in this stage - * @return a {@link Hop} object that holds settings for a stage in the graph exploration + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration */ public Hop createNextHop(QueryBuilder guidingQuery) { Hop parent = null; if (hops.size() > 0) { - parent = hops.get(hops.size() - 1); + parent = hops.get(hops.size() - 1); } Hop newHop = new Hop(parent); newHop.guidingQuery = guidingQuery; @@ -330,6 +346,43 @@ void writeTo(StreamOutput out) throws IOException { } } - + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java similarity index 61% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index 3d6c5f5aaca5e..12eb20617ff0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -3,26 +3,34 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; + import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects @@ -100,8 +108,7 @@ public void readFrom(StreamInput in) throws IOException { connections = new HashMap<>(); for (int i = 0; i < size; i++) { - Connection e = new Connection(); - e.readFrom(in, vertices); + Connection e = new Connection(in, vertices); connections.put(e.getId(), e); } @@ -146,23 +153,19 @@ public void writeTo(StreamOutput out) throws IOException { } - static final class Fields { - static final String TOOK = "took"; - static final String TIMED_OUT = "timed_out"; - static final String INDICES = "_indices"; - static final String FAILURES = "failures"; - static final String VERTICES = "vertices"; - static final String CONNECTIONS = "connections"; - - } + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.TOOK, tookInMillis); - builder.field(Fields.TIMED_OUT, timedOut); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); - builder.startArray(Fields.FAILURES); + builder.startArray(FAILURES.getPreferredName()); if (shardFailures != null) { for (ShardOperationFailedException shardFailure : shardFailures) { builder.startObject(); @@ -178,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); Params extendedParams = new DelegatingMapParams(extraParams, params); - builder.startArray(Fields.VERTICES); + builder.startArray(VERTICES.getPreferredName()); for (Vertex vertex : vertices.values()) { builder.startObject(); vertexNumbers.put(vertex, vertexNumbers.size()); @@ -187,7 +190,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); - builder.startArray(Fields.CONNECTIONS); + builder.startArray(CONNECTIONS.getPreferredName()); for (Connection connection : connections.values()) { builder.startObject(); connection.toXContent(builder, extendedParams, vertexNumbers); @@ -198,5 +201,48 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java similarity index 85% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java index 8ba7005f15fcf..e61403e8b37a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -3,12 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -41,7 +43,7 @@ *

    * */ -public class Hop { +public class Hop implements ToXContentFragment{ final Hop parentHop; List vertices = null; QueryBuilder guidingQuery = null; @@ -139,4 +141,20 @@ public int getNumberVertexRequests() { public VertexRequest getVertexRequest(int requestNumber) { return getEffectiveVertexRequests().get(requestNumber); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java similarity index 65% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java index c85d6d7dfd6e1..f17812a6396a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -3,14 +3,21 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * A vertex in a graph response represents a single term (a field and value pair) @@ -27,6 +34,13 @@ public class Vertex implements ToXContentFragment { private final int depth; private final long bg; private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { super(); @@ -50,20 +64,72 @@ void writeTo(StreamOutput out) throws IOException { out.writeVLong(bg); out.writeVLong(fg); } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); - builder.field("field", field); - builder.field("term", term); - builder.field("weight", weight); - builder.field("depth", depth); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); if (returnDetailedInfo) { - builder.field("fg", fg); - builder.field("bg", bg); + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); } return builder; } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + /** * @return a {@link VertexId} object that uniquely identifies this Vertex diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java similarity index 78% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java index f7f7dec4b1722..63d2c616547d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -3,11 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import java.io.IOException; import java.util.HashMap; @@ -21,9 +23,10 @@ * inclusion list to filter which terms are considered. * */ -public class VertexRequest { +public class VertexRequest implements ToXContentObject { private String fieldName; - private int size = 5; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; private Map includes; private Set excludes; public static final int DEFAULT_MIN_DOC_COUNT = 3; @@ -195,4 +198,38 @@ public VertexRequest shardMinDocCount(int value) { return this; } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java new file mode 100644 index 0000000000000..5d5dd0f5ef61d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Graph + * APIs. + */ +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java new file mode 100644 index 0000000000000..62353b093b5b5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + + +public class DeleteLicenseRequest extends AcknowledgedRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java new file mode 100644 index 0000000000000..926ce1d1d705b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + + +public class GetLicenseRequest extends MasterNodeReadRequest { + + public GetLicenseRequest() { + } + + public GetLicenseRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java new file mode 100644 index 0000000000000..6d5e1b5653fe7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionResponse; + +public class GetLicenseResponse extends ActionResponse { + + private String license; + + GetLicenseResponse() { + } + + public GetLicenseResponse(String license) { + this.license = license; + } + + public String getLicenseDefinition() { + return license; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java new file mode 100644 index 0000000000000..5bc66ab745e49 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +/** + * Status of an X-Pack license. + */ +public enum LicenseStatus implements Writeable { + + ACTIVE("active"), + INVALID("invalid"), + EXPIRED("expired"); + + private final String label; + + LicenseStatus(String label) { + this.label = label; + } + + public String label() { + return label; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(label); + } + + public static LicenseStatus readFrom(StreamInput in) throws IOException { + return fromString(in.readString()); + } + + public static LicenseStatus fromString(String value) { + switch (value) { + case "active": + return ACTIVE; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown license status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java new file mode 100644 index 0000000000000..18745653e761e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.util.Locale; + +public enum LicensesStatus { + VALID((byte) 0), + INVALID((byte) 1), + EXPIRED((byte) 2); + + private final byte id; + + LicensesStatus(byte id) { + this.id = id; + } + + public int id() { + return id; + } + + public static LicensesStatus fromId(int id) { + if (id == 0) { + return VALID; + } else if (id == 1) { + return INVALID; + } else if (id == 2) { + return EXPIRED; + } else { + throw new IllegalStateException("no valid LicensesStatus for id=" + id); + } + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + public static LicensesStatus fromString(String value) { + switch (value) { + case "valid": + return VALID; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown licenses status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java new file mode 100644 index 0000000000000..342e6c296e7ed --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class PutLicenseRequest extends AcknowledgedRequest { + + private String licenseDefinition; + private boolean acknowledge = false; + + public PutLicenseRequest() { + + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public void setLicenseDefinition(String licenseDefinition) { + this.licenseDefinition = licenseDefinition; + } + + public String getLicenseDefinition() { + return licenseDefinition; + } + + public void setAcknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + } + + public boolean isAcknowledge() { + return acknowledge; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java new file mode 100644 index 0000000000000..206c5a3b38366 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.common.ProtocolUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class PutLicenseResponse extends AcknowledgedResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "put_license_response", true, (a, v) -> { + boolean acknowledged = (Boolean) a[0]; + LicensesStatus licensesStatus = LicensesStatus.fromString((String) a[1]); + @SuppressWarnings("unchecked") Tuple> acknowledgements = (Tuple>) a[2]; + if (acknowledgements == null) { + return new PutLicenseResponse(acknowledged, licensesStatus); + } else { + return new PutLicenseResponse(acknowledged, licensesStatus, acknowledgements.v1(), acknowledgements.v2()); + } + + }); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); + PARSER.declareString(constructorArg(), new ParseField("license_status")); + PARSER.declareObject(optionalConstructorArg(), (parser, v) -> { + Map acknowledgeMessages = new HashMap<>(); + String message = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (currentFieldName == null) { + throw new XContentParseException(parser.getTokenLocation(), "expected message header or acknowledgement"); + } + if ("message".equals(currentFieldName)) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected message header type"); + } + message = parser.text(); + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement type"); + } + List acknowledgeMessagesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement text"); + } + acknowledgeMessagesList.add(parser.text()); + } + acknowledgeMessages.put(currentFieldName, acknowledgeMessagesList.toArray(new String[0])); + } + } + } + return new Tuple<>(message, acknowledgeMessages); + }, + new ParseField("acknowledge")); + } + + private LicensesStatus status; + private Map acknowledgeMessages; + private String acknowledgeHeader; + + public PutLicenseResponse() { + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status) { + this(acknowledged, status, null, Collections.emptyMap()); + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status, String acknowledgeHeader, + Map acknowledgeMessages) { + super(acknowledged); + this.status = status; + this.acknowledgeHeader = acknowledgeHeader; + this.acknowledgeMessages = acknowledgeMessages; + } + + public LicensesStatus status() { + return status; + } + + public Map acknowledgeMessages() { + return acknowledgeMessages; + } + + public String acknowledgeHeader() { + return acknowledgeHeader; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + status = LicensesStatus.fromId(in.readVInt()); + acknowledgeHeader = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(status.id()); + out.writeOptionalString(acknowledgeHeader); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.field("license_status", status.toString()); + if (!acknowledgeMessages.isEmpty()) { + builder.startObject("acknowledge"); + builder.field("message", acknowledgeHeader); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static PutLicenseResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + PutLicenseResponse that = (PutLicenseResponse) o; + + return status == that.status && + ProtocolUtils.equals(acknowledgeMessages, that.acknowledgeMessages) && + Objects.equals(acknowledgeHeader, that.acknowledgeHeader); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), status, ProtocolUtils.hashCode(acknowledgeMessages), acknowledgeHeader); + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java new file mode 100644 index 0000000000000..a0a80a9958b95 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's License + * APIs. + */ +package org.elasticsearch.protocol.xpack.license; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java new file mode 100644 index 0000000000000..17afee59fa156 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class IndexUpgradeInfoRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + + public IndexUpgradeInfoRequest(String... indices) { + indices(indices); + } + + public IndexUpgradeInfoRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndexUpgradeInfoRequest indices(String... indices) { + this.indices = Objects.requireNonNull(indices, "indices cannot be null"); + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoRequest request = (IndexUpgradeInfoRequest) o; + return Arrays.equals(indices, request.indices) && + Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java new file mode 100644 index 0000000000000..17115ac9b1711 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class IndexUpgradeInfoResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField INDICES = new ParseField("indices"); + private static final ParseField ACTION_REQUIRED = new ParseField("action_required"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("IndexUpgradeInfoResponse", + true, + (a, c) -> { + @SuppressWarnings("unchecked") + Map map = (Map)a[0]; + Map actionsRequired = map.entrySet().stream() + .filter(e -> { + if (e.getValue() instanceof Map == false) { + return false; + } + @SuppressWarnings("unchecked") + Map value =(Map)e.getValue(); + return value.containsKey(ACTION_REQUIRED.getPreferredName()); + }) + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> { + @SuppressWarnings("unchecked") + Map value = (Map) e.getValue(); + return UpgradeActionRequired.fromString((String)value.get(ACTION_REQUIRED.getPreferredName())); + } + )); + return new IndexUpgradeInfoResponse(actionsRequired); + }); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), INDICES); + } + + + private Map actions; + + public IndexUpgradeInfoResponse() { + + } + + public IndexUpgradeInfoResponse(Map actions) { + this.actions = actions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + } + + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.startObject(INDICES.getPreferredName()); + for (Map.Entry entry : actions.entrySet()) { + builder.startObject(entry.getKey()); + { + builder.field(ACTION_REQUIRED.getPreferredName(), entry.getValue().toString()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoResponse response = (IndexUpgradeInfoResponse) o; + return Objects.equals(actions, response.actions); + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + + public static IndexUpgradeInfoResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java new file mode 100644 index 0000000000000..dce1c7d18f50e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * Indicates the type of the upgrade required for the index + */ +public enum UpgradeActionRequired implements Writeable { + NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed + UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required + REINDEX, // The index should be reindex + UPGRADE; // The index should go through the upgrade procedure + + public static UpgradeActionRequired fromString(String value) { + return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { + return in.readEnum(UpgradeActionRequired.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java new file mode 100644 index 0000000000000..7c52f6a8fd4f1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Migration + * APIs. + */ +package org.elasticsearch.protocol.xpack.migration; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java new file mode 100644 index 0000000000000..3ed877d08cccd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for miscellaneous X-Pack APIs. + */ +package org.elasticsearch.protocol.xpack; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java new file mode 100644 index 0000000000000..ce627b267f31e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Security + * APIs. + */ +package org.elasticsearch.protocol.xpack.security; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java new file mode 100644 index 0000000000000..4a458b69a750d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; + +import java.io.IOException; + +/** + * A delete watch request to delete an watch by name (id) + */ +public class DeleteWatchRequest extends ActionRequest { + + private String id; + private long version = Versions.MATCH_ANY; + + public DeleteWatchRequest() { + this(null); + } + + public DeleteWatchRequest(String id) { + this.id = id; + } + + /** + * @return The name of the watch to be deleted + */ + public String getId() { + return id; + } + + /** + * Sets the name of the watch to be deleted + */ + public void setId(String id) { + this.id = id; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (PutWatchRequest.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeLong(version); + } + + @Override + public String toString() { + return "delete [" + id + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java new file mode 100644 index 0000000000000..39cd5e966fa12 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_delete_watch_response", DeleteWatchResponse::new); + static { + PARSER.declareString(DeleteWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(DeleteWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(DeleteWatchResponse::setFound, new ParseField("found")); + } + + private String id; + private long version; + private boolean found; + + public DeleteWatchResponse() { + } + + public DeleteWatchResponse(String id, long version, boolean found) { + this.id = id; + this.version = version; + this.found = found; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isFound() { + return found; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setFound(boolean found) { + this.found = found; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DeleteWatchResponse that = (DeleteWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(found, that.found); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, found); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(found); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("found", found) + .endObject(); + } + + public static DeleteWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java new file mode 100644 index 0000000000000..7997d853db37a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.regex.Pattern; + +/** + * This request class contains the data needed to create a watch along with the name of the watch. + * The name of the watch will become the ID of the indexed document. + */ +public final class PutWatchRequest extends ActionRequest { + + private static final Pattern NO_WS_PATTERN = Pattern.compile("\\S+"); + + private String id; + private BytesReference source; + private XContentType xContentType = XContentType.JSON; + private boolean active = true; + private long version = Versions.MATCH_ANY; + + public PutWatchRequest() {} + + public PutWatchRequest(StreamInput in) throws IOException { + readFrom(in); + } + + public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + this.id = id; + this.source = source; + this.xContentType = xContentType; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + source = in.readBytesReference(); + active = in.readBoolean(); + xContentType = in.readEnum(XContentType.class); + version = in.readZLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBytesReference(source); + out.writeBoolean(active); + out.writeEnum(xContentType); + out.writeZLong(version); + } + + /** + * @return The name that will be the ID of the indexed document + */ + public String getId() { + return id; + } + + /** + * Set the watch name + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return The source of the watch + */ + public BytesReference getSource() { + return source; + } + + /** + * Set the source of the watch + */ + public void setSource(BytesReference source, XContentType xContentType) { + this.source = source; + this.xContentType = xContentType; + } + + /** + * @return The initial active state of the watch (defaults to {@code true}, e.g. "active") + */ + public boolean isActive() { + return active; + } + + /** + * Sets the initial active state of the watch + */ + public void setActive(boolean active) { + this.active = active; + } + + /** + * Get the content type for the source + */ + public XContentType xContentType() { + return xContentType; + } + + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null) { + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + if (source == null) { + validationException = ValidateActions.addValidationError("watch source is missing", validationException); + } + if (xContentType == null) { + validationException = ValidateActions.addValidationError("request body is missing", validationException); + } + return validationException; + } + + public static boolean isValidId(String id) { + return Strings.isEmpty(id) == false && NO_WS_PATTERN.matcher(id).matches(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java new file mode 100644 index 0000000000000..f6e55ff555339 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class PutWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_put_watch_response", PutWatchResponse::new); + static { + PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); + } + + private String id; + private long version; + private boolean created; + + public PutWatchResponse() { + } + + public PutWatchResponse(String id, long version, boolean created) { + this.id = id; + this.version = version; + this.created = created; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setCreated(boolean created) { + this.created = created; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isCreated() { + return created; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + PutWatchResponse that = (PutWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(created, that.created); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, created); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + created = in.readBoolean(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("created", created) + .endObject(); + } + + public static PutWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java new file mode 100644 index 0000000000000..0d9edf3b5c035 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Watcher + * APIs. + */ +package org.elasticsearch.protocol.xpack.watcher; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java index 5503eb692558b..e4fd8d0435106 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.graph.action; import org.elasticsearch.action.Action; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; public class GraphExploreAction extends Action { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java index d5e756f78a20e..37456f234648a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java @@ -11,6 +11,9 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java new file mode 100644 index 0000000000000..ee0c0de97e0ae --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.apache.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, + * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). + * Only one background job can run simultaneously and {@link #onFinish()} is called when the job + * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is + * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when + * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * + * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, + * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. + * + * @param Type that defines a job position to be defined by the implementation. + */ +public abstract class AsyncTwoPhaseIndexer { + private static final Logger logger = Logger.getLogger(AsyncTwoPhaseIndexer.class.getName()); + + private final JobStats stats; + + private final AtomicReference state; + private final AtomicReference position; + private final Executor executor; + + protected AsyncTwoPhaseIndexer(Executor executor, AtomicReference initialState, + JobPosition initialPosition, JobStats jobStats) { + this.executor = executor; + this.state = initialState; + this.position = new AtomicReference<>(initialPosition); + this.stats = jobStats; + } + + /** + * Get the current state of the indexer. + */ + public IndexerState getState() { + return state.get(); + } + + /** + * Get the current position of the indexer. + */ + public JobPosition getPosition() { + return position.get(); + } + + /** + * Get the stats of this indexer. + */ + public JobStats getStats() { + return stats; + } + + /** + * Sets the internal state to {@link IndexerState#STARTED} if the previous state + * was {@link IndexerState#STOPPED}. Setting the state to STARTED allows a job + * to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. + * + * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState start() { + state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); + return state.get(); + } + + /** + * Sets the internal state to {@link IndexerState#STOPPING} if an async job is + * running in the background and in such case {@link #onFinish()} will be called + * as soon as the background job detects that the indexer is stopped. If there + * is no job running when this function is called, the state is directly set to + * {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. + * + * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState stop() { + IndexerState currentState = state.updateAndGet(previousState -> { + if (previousState == IndexerState.INDEXING) { + return IndexerState.STOPPING; + } else if (previousState == IndexerState.STARTED) { + return IndexerState.STOPPED; + } else { + return previousState; + } + }); + return currentState; + } + + /** + * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if + * an async job is running in the background and in such case {@link #onAbort} + * will be called as soon as the background job detects that the indexer is + * aborted. If there is no job running when this function is called, it returns + * true and {@link #onAbort()} will never be called. + * + * @return true if the indexer is aborted, false if a background job is running + * and abort is delayed. + */ + public synchronized boolean abort() { + IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); + return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; + } + + /** + * Triggers a background job that builds the index asynchronously iff + * there is no other job that runs and the indexer is started + * ({@link IndexerState#STARTED}. + * + * @param now + * The current time in milliseconds (used to limit the job to + * complete buckets) + * @return true if a job has been triggered, false otherwise + */ + public synchronized boolean maybeTriggerAsyncJob(long now) { + final IndexerState currentState = state.get(); + switch (currentState) { + case INDEXING: + case STOPPING: + case ABORTING: + logger.warn("Schedule was triggered for job [" + getJobId() + "], but prior indexer is still running."); + return false; + + case STOPPED: + logger.debug("Schedule was triggered for job [" + getJobId() + "] but job is stopped. Ignoring trigger."); + return false; + + case STARTED: + logger.debug("Schedule was triggered for job [" + getJobId() + "], state: [" + currentState + "]"); + stats.incrementNumInvocations(1); + onStartJob(now); + + if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { + // fire off the search. Note this is async, the method will return from here + executor.execute(() -> doNextSearch(buildSearchRequest(), + ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); + logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); + return true; + } else { + logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); + return false; + } + + default: + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Job encountered an illegal state [" + currentState + "]"); + } + } + + /** + * Called to get the Id of the job, used for logging. + * + * @return a string with the id of the job + */ + protected abstract String getJobId(); + + /** + * Called to process a response from the 1 search request in order to turn it into a {@link IterationResult}. + * + * @param searchResponse response from the search phase. + * @return Iteration object to be passed to indexing phase. + */ + protected abstract IterationResult doProcess(SearchResponse searchResponse); + + /** + * Called to build the next search request. + * + * @return SearchRequest to be passed to the search phase. + */ + protected abstract SearchRequest buildSearchRequest(); + + /** + * Called at startup after job has been triggered using {@link #maybeTriggerAsyncJob(long)} and the + * internal state is {@link IndexerState#STARTED}. + * + * @param now The current time in milliseconds passed through from {@link #maybeTriggerAsyncJob(long)} + */ + protected abstract void onStartJob(long now); + + /** + * Executes the {@link SearchRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The search request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); + + /** + * Executes the {@link BulkRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The bulk request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); + + /** + * Called periodically during the execution of a background job. Implementation + * should persists the state somewhere and continue the execution asynchronously + * using next. + * + * @param state + * The current state of the indexer + * @param position + * The current position of the indexer + * @param next + * Runnable for the next phase + */ + protected abstract void doSaveState(IndexerState state, JobPosition position, Runnable next); + + /** + * Called when a failure occurs in an async job causing the execution to stop. + * + * @param exc + * The exception + */ + protected abstract void onFailure(Exception exc); + + /** + * Called when a background job finishes. + */ + protected abstract void onFinish(); + + /** + * Called when a background job detects that the indexer is aborted causing the + * async execution to stop. + */ + protected abstract void onAbort(); + + private void finishWithFailure(Exception exc) { + doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); + } + + private IndexerState finishAndSetState() { + return state.updateAndGet(prev -> { + switch (prev) { + case INDEXING: + // ready for another job + return IndexerState.STARTED; + + case STOPPING: + // must be started again + return IndexerState.STOPPED; + + case ABORTING: + // abort and exit + onAbort(); + return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first + + case STOPPED: + // No-op. Shouldn't really be possible to get here (should have to go through + // STOPPING + // first which will be handled) but is harmless to no-op and we don't want to + // throw exception here + return IndexerState.STOPPED; + + default: + // any other state is unanticipated at this point + throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); + } + }); + } + + private void onSearchResponse(SearchResponse searchResponse) { + try { + if (checkState(getState()) == false) { + return; + } + if (searchResponse.getShardFailures().length != 0) { + throw new RuntimeException("Shard failures encountered while running indexer for job [" + getJobId() + "]: " + + Arrays.toString(searchResponse.getShardFailures())); + } + + stats.incrementNumPages(1); + IterationResult iterationResult = doProcess(searchResponse); + + if (iterationResult.isDone()) { + logger.debug("Finished indexing for job [" + getJobId() + "], saving state and shutting down."); + + // Change state first, then try to persist. This prevents in-progress + // STOPPING/ABORTING from + // being persisted as STARTED but then stop the job + doSaveState(finishAndSetState(), position.get(), this::onFinish); + return; + } + + final List docs = iterationResult.getToIndex(); + final BulkRequest bulkRequest = new BulkRequest(); + docs.forEach(bulkRequest::add); + + // TODO this might be a valid case, e.g. if implementation filters + assert bulkRequest.requests().size() > 0; + + doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { + // TODO we should check items in the response and move after accordingly to + // resume the failing buckets ? + if (bulkResponse.hasFailures()) { + logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); + } + stats.incrementNumOutputDocuments(bulkResponse.getItems().length); + if (checkState(getState()) == false) { + return; + } + + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); + + onBulkResponse(bulkResponse, newPosition); + }, exc -> finishWithFailure(exc))); + } catch (Exception e) { + finishWithFailure(e); + } + } + + private void onBulkResponse(BulkResponse response, JobPosition position) { + try { + + ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); + // TODO probably something more intelligent than every-50 is needed + if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { + doSaveState(IndexerState.INDEXING, position, () -> doNextSearch(buildSearchRequest(), listener)); + } else { + doNextSearch(buildSearchRequest(), listener); + } + } catch (Exception e) { + finishWithFailure(e); + } + } + + /** + * Checks the {@link IndexerState} and returns false if the execution should be + * stopped. + */ + private boolean checkState(IndexerState currentState) { + switch (currentState) { + case INDEXING: + // normal state; + return true; + + case STOPPING: + logger.info("Indexer job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); + doSaveState(finishAndSetState(), getPosition(), () -> { + }); + return false; + + case STOPPED: + return false; + + case ABORTING: + logger.info("Requested shutdown of indexer for job [" + getJobId() + "]"); + onAbort(); + return false; + + default: + // Anything other than indexing, aborting or stopping is unanticipated + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Indexer job encountered an illegal state [" + currentState + "]"); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java new file mode 100644 index 0000000000000..2453504a5ba77 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class holds the runtime statistics of a job. The stats are not used by any internal process + * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the + * allocated task is shutdown/restarted on a different node all the stats will reset. + */ +public abstract class IndexerJobStats implements ToXContentObject, Writeable { + + public static final ParseField NAME = new ParseField("job_stats"); + + protected long numPages = 0; + protected long numInputDocuments = 0; + protected long numOuputDocuments = 0; + protected long numInvocations = 0; + + public IndexerJobStats() { + } + + public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + this.numPages = numPages; + this.numInputDocuments = numInputDocuments; + this.numOuputDocuments = numOuputDocuments; + this.numInvocations = numInvocations; + } + + public IndexerJobStats(StreamInput in) throws IOException { + this.numPages = in.readVLong(); + this.numInputDocuments = in.readVLong(); + this.numOuputDocuments = in.readVLong(); + this.numInvocations = in.readVLong(); + } + + public long getNumPages() { + return numPages; + } + + public long getNumDocuments() { + return numInputDocuments; + } + + public long getNumInvocations() { + return numInvocations; + } + + public long getOutputDocuments() { + return numOuputDocuments; + } + + public void incrementNumPages(long n) { + assert(n >= 0); + numPages += n; + } + + public void incrementNumDocuments(long n) { + assert(n >= 0); + numInputDocuments += n; + } + + public void incrementNumInvocations(long n) { + assert(n >= 0); + numInvocations += n; + } + + public void incrementNumOutputDocuments(long n) { + assert(n >= 0); + numOuputDocuments += n; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numPages); + out.writeVLong(numInputDocuments); + out.writeVLong(numOuputDocuments); + out.writeVLong(numInvocations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + IndexerJobStats that = (IndexerJobStats) other; + + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numInputDocuments, that.numInputDocuments) + && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numInvocations, that.numInvocations); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java index 6e211c1df9e3e..1b6b9a943cba2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java new file mode 100644 index 0000000000000..1261daf185b48 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.index.IndexRequest; + +import java.util.List; + +/** + * Result object to hold the result of 1 iteration of iterative indexing. + * Acts as an interface between the implementation and the generic indexer. + */ +public class IterationResult { + + private final boolean isDone; + private final JobPosition position; + private final List toIndex; + + /** + * Constructor for the result of 1 iteration. + * + * @param toIndex the list of requests to be indexed + * @param position the extracted, persistable position of the job required for the search phase + * @param isDone true if source is exhausted and job should go to sleep + * + * Note: toIndex.empty() != isDone due to possible filtering in the specific implementation + */ + public IterationResult(List toIndex, JobPosition position, boolean isDone) { + this.toIndex = toIndex; + this.position = position; + this.isDone = isDone; + } + + /** + * Returns true if this indexing iteration is done and job should go into sleep mode. + */ + public boolean isDone() { + return isDone; + } + + /** + * Return the position of the job, a generic to be passed to the next query construction. + * + * @return the position + */ + public JobPosition getPosition() { + return position; + } + + /** + * List of requests to be passed to bulk indexing. + * + * @return List of index requests. + */ + public List getToIndex() { + return toIndex; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index e0b71abe966db..193695ac69362 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -115,7 +115,7 @@ public Set expandDatafeedIds(String expression, boolean allowNoDatafeeds @Override public Version getMinimalSupportedVersion() { - return Version.V_5_4_0; + return Version.V_6_0_0_alpha1; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index fb3ac55cda027..73cdbeef44259 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -72,18 +71,14 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 933e98b80ff80..56b7ec2b52fc1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -79,18 +78,14 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index ef086b5126228..4b96a4d6b2746 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -127,9 +126,7 @@ public void readFrom(StreamInput in) throws IOException { start = in.readOptionalString(); end = in.readOptionalString(); advanceTime = in.readOptionalString(); - if (in.getVersion().after(Version.V_5_5_0)) { - skipTime = in.readOptionalString(); - } + skipTime = in.readOptionalString(); } @Override @@ -139,9 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(start); out.writeOptionalString(end); out.writeOptionalString(advanceTime); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeOptionalString(skipTime); - } + out.writeOptionalString(skipTime); } @Override @@ -222,18 +217,14 @@ public Date getLastFinalizedBucketEnd() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); flushed = in.readBoolean(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(flushed); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java index 29b3d4bb8d557..c6c87ef0e465d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -162,7 +161,7 @@ public PageParams getPageParams() { public void setPageParams(PageParams pageParams) { if (timestamp != null) { - throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "] is incompatible with [" + TIMESTAMP.getPreferredName() + "]."); } this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName()); @@ -212,10 +211,8 @@ public void readFrom(StreamInput in) throws IOException { end = in.readOptionalString(); anomalyScore = in.readOptionalDouble(); pageParams = in.readOptionalWriteable(PageParams::new); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - sort = in.readString(); - descending = in.readBoolean(); - } + sort = in.readString(); + descending = in.readBoolean(); } @Override @@ -229,10 +226,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(end); out.writeOptionalDouble(anomalyScore); out.writeOptionalWriteable(pageParams); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeString(sort); - out.writeBoolean(descending); - } + out.writeString(sort); + out.writeBoolean(descending); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index c108a983aa17b..fc38d974defff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -168,10 +168,6 @@ public JobParams(String jobId) { public JobParams(StreamInput in) throws IOException { jobId = in.readString(); - if (in.getVersion().onOrBefore(Version.V_5_5_0)) { - // Read `ignoreDowntime` - in.readBoolean(); - } timeout = TimeValue.timeValueMillis(in.readVLong()); } @@ -199,10 +195,6 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - if (out.getVersion().onOrBefore(Version.V_5_5_0)) { - // Write `ignoreDowntime` - true by default - out.writeBoolean(true); - } out.writeVLong(timeout.millis()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 1034b00af0a34..cdf25438cea33 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -189,10 +189,6 @@ public DatafeedConfig(StreamInput in) throws IOException { this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // read former _source field - in.readBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); if (in.getVersion().onOrAfter(Version.V_6_2_0)) { this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); @@ -290,10 +286,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // write former _source field - out.writeBoolean(false); - } out.writeOptionalWriteable(chunkingConfig); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index d894f7b339fe5..70102f27a5669 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,14 +48,6 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { DatafeedState state = this; - // STARTING & STOPPING states were introduced in v5.5. - if (out.getVersion().before(Version.V_5_5_0)) { - if (this == STARTING) { - state = STOPPED; - } else if (this == STOPPING) { - state = STARTED; - } - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index f3748cefc51bc..d5425bdd1f469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -122,10 +121,6 @@ public DatafeedUpdate(StreamInput in) throws IOException { this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - in.readOptionalBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); } @@ -163,10 +158,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - out.writeOptionalBoolean(null); - } out.writeOptionalWriteable(chunkingConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 135ad755359e6..9068ffda4de55 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -162,9 +162,8 @@ public AnalysisConfig(StreamInput in) throws IOException { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (in.getVersion().before(Version.V_6_5_0)) { in.readBoolean(); } } @@ -197,9 +196,8 @@ public void writeTo(StreamOutput out) throws IOException { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (out.getVersion().before(Version.V_6_5_0)) { out.writeBoolean(false); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 93aa5495c409e..b5083aeecb9ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.job.config; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -248,12 +247,7 @@ public Detector(StreamInput in) throws IOException { useNull = in.readBoolean(); excludeFrequent = in.readBoolean() ? ExcludeFrequent.readFromStream(in) : null; rules = Collections.unmodifiableList(in.readList(DetectionRule::new)); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - detectorIndex = in.readInt(); - } else { - // negative means unknown, and is expected for 5.4 jobs - detectorIndex = -1; - } + detectorIndex = in.readInt(); } @Override @@ -276,9 +270,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeInt(detectorIndex); - } + out.writeInt(detectorIndex); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 0005d16a99c94..a978612fd02e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -214,11 +214,7 @@ private Job(String jobId, String jobType, Version jobVersion, List group public Job(StreamInput in) throws IOException { jobId = in.readString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } else { - jobVersion = null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = Collections.unmodifiableList(in.readList(StreamInput::readString)); } else { @@ -482,13 +478,11 @@ public long earliestValidTimestamp(DataCounts dataCounts) { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); @@ -666,9 +660,7 @@ private static void checkValueNotLessThan(long minVal, String name, Long value) */ public static Set getCompatibleJobTypes(Version nodeVersion) { Set compatibleTypes = new HashSet<>(); - if (nodeVersion.onOrAfter(Version.V_5_4_0)) { - compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); - } + compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); return compatibleTypes; } @@ -732,9 +724,7 @@ public Builder(Job job) { public Builder(StreamInput in) throws IOException { id = in.readOptionalString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = in.readList(StreamInput::readString); } else { @@ -921,13 +911,11 @@ public List invalidCreateTimeSettings() { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java index e89149a062b68..948284d5e0080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -34,10 +33,6 @@ public static JobState fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { JobState state = this; - // Pre v5.5 the OPENING state didn't exist - if (this == OPENING && out.getVersion().before(Version.V_5_5_0)) { - state = CLOSED; - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 380f540a31782..cdfd9bad7f1de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -258,7 +258,7 @@ public Version getJobVersion() { } public boolean isAutodetectProcessUpdate() { - return modelPlotConfig != null || detectorUpdates != null; + return modelPlotConfig != null || detectorUpdates != null || groups != null; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index ad8b24e66c643..2d9afa833c3c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.output; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -58,17 +57,13 @@ public FlushAcknowledgement(String id, Date lastFinalizedBucketEnd) { public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 03487500d8a8b..068b998dc251a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -143,7 +143,7 @@ public ModelSnapshot(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { minVersion = Version.readVersion(in); } else { - minVersion = Version.V_5_5_0; + minVersion = Version.CURRENT.minimumCompatibilityVersion(); } timestamp = in.readBoolean() ? new Date(in.readVLong()) : null; description = in.readOptionalString(); @@ -357,9 +357,8 @@ public static class Builder { private String jobId; // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 360bcfaaeadfd..869cdcb437e1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -163,10 +162,6 @@ public AnomalyRecord(String jobId, Date timestamp, long bucketSpan) { @SuppressWarnings("unchecked") public AnomalyRecord(StreamInput in) throws IOException { jobId = in.readString(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } detectorIndex = in.readInt(); probability = in.readDouble(); byFieldName = in.readOptionalString(); @@ -201,10 +196,6 @@ public AnomalyRecord(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeInt(detectorIndex); out.writeDouble(probability); out.writeOptionalString(byFieldName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 8a7fe2395b4e0..8280ee9f22ef0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -137,19 +137,11 @@ public Bucket(StreamInput in) throws IOException { anomalyScore = in.readDouble(); bucketSpan = in.readLong(); initialAnomalyScore = in.readDouble(); - // bwc for recordCount - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } records = in.readList(AnomalyRecord::new); eventCount = in.readLong(); isInterim = in.readBoolean(); bucketInfluencers = in.readList(BucketInfluencer::new); processingTimeMs = in.readLong(); - // bwc for perPartitionMaxProbability - if (in.getVersion().before(Version.V_5_5_0)) { - in.readGenericValue(); - } // bwc for perPartitionNormalization if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); @@ -171,19 +163,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(anomalyScore); out.writeLong(bucketSpan); out.writeDouble(initialAnomalyScore); - // bwc for recordCount - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeList(records); out.writeLong(eventCount); out.writeBoolean(isInterim); out.writeList(bucketInfluencers); out.writeLong(processingTimeMs); - // bwc for perPartitionMaxProbability - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeGenericValue(Collections.emptyMap()); - } // bwc for perPartitionNormalization if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java index 8b18562ec6d1e..38d76789a2ea6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -100,10 +99,6 @@ public BucketInfluencer(StreamInput in) throws IOException { isInterim = in.readBoolean(); timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -117,10 +112,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isInterim); out.writeLong(timestamp.getTime()); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java index 97ed643c44dd5..8ee49cb88d05f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,10 +96,6 @@ public Influencer(StreamInput in) throws IOException { influencerScore = in.readDouble(); isInterim = in.readBoolean(); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -114,10 +109,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(influencerScore); out.writeBoolean(isInterim); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java index c331d8b043797..9f066b6e98ec3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java @@ -109,20 +109,7 @@ public ModelPlot(String jobId, Date timestamp, long bucketSpan, int detectorInde public ModelPlot(StreamInput in) throws IOException { jobId = in.readString(); - // timestamp isn't optional in v5.5 - if (in.getVersion().before(Version.V_5_5_0)) { - if (in.readBoolean()) { - timestamp = new Date(in.readLong()); - } else { - timestamp = new Date(); - } - } else { - timestamp = new Date(in.readLong()); - } - // bwc for removed id field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readOptionalString(); - } + timestamp = new Date(in.readLong()); partitionFieldName = in.readOptionalString(); partitionFieldValue = in.readOptionalString(); overFieldName = in.readOptionalString(); @@ -138,11 +125,7 @@ public ModelPlot(StreamInput in) throws IOException { } else { actual = in.readOptionalDouble(); } - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - bucketSpan = in.readLong(); - } else { - bucketSpan = 0; - } + bucketSpan = in.readLong(); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { detectorIndex = in.readInt(); } else { @@ -154,20 +137,7 @@ public ModelPlot(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // timestamp isn't optional in v5.5 - if (out.getVersion().before(Version.V_5_5_0)) { - boolean hasTimestamp = timestamp != null; - out.writeBoolean(hasTimestamp); - if (hasTimestamp) { - out.writeLong(timestamp.getTime()); - } - } else { - out.writeLong(timestamp.getTime()); - } - // bwc for removed id field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeOptionalString(null); - } + out.writeLong(timestamp.getTime()); out.writeOptionalString(partitionFieldName); out.writeOptionalString(partitionFieldValue); out.writeOptionalString(overFieldName); @@ -189,9 +159,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeOptionalDouble(actual); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeLong(bucketSpan); - } + out.writeLong(bucketSpan); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeInt(detectorIndex); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index 50f7931508585..7bbbf07e6dcbe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import java.io.IOException; @@ -174,7 +174,14 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(JOBS.getPreferredName(), jobs); + + // XContentBuilder does not support passing the params object for Iterables + builder.field(JOBS.getPreferredName()); + builder.startArray(); + for (JobWrapper job : jobs) { + job.toXContent(builder, params); + } + builder.endArray(); builder.endObject(); return builder; } @@ -204,20 +211,20 @@ public final String toString() { public static class JobWrapper implements Writeable, ToXContentObject { private final RollupJobConfig job; - private final RollupJobStats stats; + private final RollupIndexerJobStats stats; private final RollupJobStatus status; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, a -> new JobWrapper((RollupJobConfig) a[0], - (RollupJobStats) a[1], (RollupJobStatus)a[2])); + (RollupIndexerJobStats) a[1], (RollupJobStatus)a[2])); static { PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.fromXContent(p, null), CONFIG); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStats.PARSER::apply, STATS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupIndexerJobStats.PARSER::apply, STATS); PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS); } - public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus status) { + public JobWrapper(RollupJobConfig job, RollupIndexerJobStats stats, RollupJobStatus status) { this.job = job; this.stats = stats; this.status = status; @@ -225,7 +232,7 @@ public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus sta public JobWrapper(StreamInput in) throws IOException { this.job = new RollupJobConfig(in); - this.stats = new RollupJobStats(in); + this.stats = new RollupIndexerJobStats(in); this.status = new RollupJobStatus(in); } @@ -240,7 +247,7 @@ public RollupJobConfig getJob() { return job; } - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return stats; } @@ -254,7 +261,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CONFIG.getPreferredName()); job.toXContent(builder, params); builder.field(STATUS.getPreferredName(), status); - builder.field(STATS.getPreferredName(), stats); + builder.field(STATS.getPreferredName(), stats, params); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index 1b8eb736084a8..054d08df999f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -11,15 +11,26 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonMap; /** * Represents the Rollup capabilities for a specific job on a single rollup index @@ -42,52 +53,7 @@ public RollupJobCaps(RollupJobConfig job) { jobID = job.getId(); rollupIndex = job.getRollupIndex(); indexPattern = job.getIndexPattern(); - Map dateHistoAggCap = job.getGroupConfig().getDateHistogram().toAggCap(); - String dateField = job.getGroupConfig().getDateHistogram().getField(); - RollupFieldCaps fieldCaps = fieldCapLookup.get(dateField); - if (fieldCaps == null) { - fieldCaps = new RollupFieldCaps(); - } - fieldCaps.addAgg(dateHistoAggCap); - fieldCapLookup.put(dateField, fieldCaps); - - if (job.getGroupConfig().getHistogram() != null) { - Map histoAggCap = job.getGroupConfig().getHistogram().toAggCap(); - Arrays.stream(job.getGroupConfig().getHistogram().getFields()).forEach(field -> { - RollupFieldCaps caps = fieldCapLookup.get(field); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(histoAggCap); - fieldCapLookup.put(field, caps); - }); - } - - if (job.getGroupConfig().getTerms() != null) { - Map histoAggCap = job.getGroupConfig().getTerms().toAggCap(); - Arrays.stream(job.getGroupConfig().getTerms().getFields()).forEach(field -> { - RollupFieldCaps caps = fieldCapLookup.get(field); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(histoAggCap); - fieldCapLookup.put(field, caps); - }); - } - - if (job.getMetricsConfig().size() > 0) { - job.getMetricsConfig().forEach(metricConfig -> { - List> metrics = metricConfig.toAggCap(); - metrics.forEach(m -> { - RollupFieldCaps caps = fieldCapLookup.get(metricConfig.getField()); - if (caps == null) { - caps = new RollupFieldCaps(); - } - caps.addAgg(m); - fieldCapLookup.put(metricConfig.getField(), caps); - }); - }); - } + fieldCapLookup = createRollupFieldCaps(job); } public RollupJobCaps(StreamInput in) throws IOException { @@ -149,8 +115,8 @@ public boolean equals(Object other) { RollupJobCaps that = (RollupJobCaps) other; return Objects.equals(this.jobID, that.jobID) - && Objects.equals(this.rollupIndex, that.rollupIndex) - && Objects.equals(this.fieldCapLookup, that.fieldCapLookup); + && Objects.equals(this.rollupIndex, that.rollupIndex) + && Objects.equals(this.fieldCapLookup, that.fieldCapLookup); } @Override @@ -158,6 +124,77 @@ public int hashCode() { return Objects.hash(jobID, rollupIndex, fieldCapLookup); } + static Map createRollupFieldCaps(final RollupJobConfig rollupJobConfig) { + final Map fieldCapLookup = new HashMap<>(); + + final GroupConfig groupConfig = rollupJobConfig.getGroupConfig(); + if (groupConfig != null) { + // Create RollupFieldCaps for the date histogram + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + final Map dateHistogramAggCap = new HashMap<>(); + dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME); + dateHistogramAggCap.put(DateHistogramGroupConfig.INTERVAL, dateHistogram.getInterval().toString()); + if (dateHistogram.getDelay() != null) { + dateHistogramAggCap.put(DateHistogramGroupConfig.DELAY, dateHistogram.getDelay().toString()); + } + dateHistogramAggCap.put(DateHistogramGroupConfig.TIME_ZONE, dateHistogram.getTimeZone()); + + final RollupFieldCaps dateHistogramFieldCaps = new RollupFieldCaps(); + dateHistogramFieldCaps.addAgg(dateHistogramAggCap); + fieldCapLookup.put(dateHistogram.getField(), dateHistogramFieldCaps); + + // Create RollupFieldCaps for the histogram + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + final Map histogramAggCap = new HashMap<>(); + histogramAggCap.put("agg", HistogramAggregationBuilder.NAME); + histogramAggCap.put(HistogramGroupConfig.INTERVAL, histogram.getInterval()); + for (String field : histogram.getFields()) { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(histogramAggCap); + fieldCapLookup.put(field, caps); + } + } + + // Create RollupFieldCaps for the term + final TermsGroupConfig terms = groupConfig.getTerms(); + if (terms != null) { + final Map termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME); + for (String field : terms.getFields()) { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(termsAggCap); + fieldCapLookup.put(field, caps); + } + } + } + + // Create RollupFieldCaps for the metrics + final List metricsConfig = rollupJobConfig.getMetricsConfig(); + if (metricsConfig.size() > 0) { + metricsConfig.forEach(metricConfig -> { + final List> metrics = metricConfig.getMetrics().stream() + .map(metric -> singletonMap("agg", (Object) metric)) + .collect(Collectors.toList()); + + metrics.forEach(m -> { + RollupFieldCaps caps = fieldCapLookup.get(metricConfig.getField()); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(m); + fieldCapLookup.put(metricConfig.getField(), caps); + }); + }); + } + return Collections.unmodifiableMap(fieldCapLookup); + } + public static class RollupFieldCaps implements Writeable, ToXContentObject { private List> aggs = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index 77dfa1cbbb1c3..166322b93722c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -20,17 +20,11 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.xpack.core.rollup.RollupField; import org.joda.time.DateTimeZone; import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,10 +49,10 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { static final String NAME = "date_histogram"; - private static final String INTERVAL = "interval"; + public static final String INTERVAL = "interval"; private static final String FIELD = "field"; public static final String TIME_ZONE = "time_zone"; - private static final String DELAY = "delay"; + public static final String DELAY = "delay"; private static final String DEFAULT_TIMEZONE = "UTC"; private static final ConstructingObjectParser PARSER; static { @@ -183,38 +177,6 @@ public Rounding createRounding() { return createRounding(interval.toString(), timeZone); } - /** - * This returns a set of aggregation builders which represent the configured - * set of date histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - DateHistogramValuesSourceBuilder vsBuilder = - new DateHistogramValuesSourceBuilder(RollupField.formatIndexerAggName(field, DateHistogramAggregationBuilder.NAME)); - vsBuilder.dateHistogramInterval(interval); - vsBuilder.field(field); - vsBuilder.timeZone(toDateTimeZone(timeZone)); - return Collections.singletonList(vsBuilder); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(3); - map.put("agg", DateHistogramAggregationBuilder.NAME); - map.put(INTERVAL, interval.toString()); - if (delay != null) { - map.put(DELAY, delay.toString()); - } - map.put(TIME_ZONE, timeZone); - - return map; - } - - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval.toString()); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java index 0480050bf52f0..a22d022ee2dbb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java @@ -16,19 +16,13 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -47,7 +41,7 @@ public class HistogramGroupConfig implements Writeable, ToXContentObject { static final String NAME = "histogram"; - private static final String INTERVAL = "interval"; + public static final String INTERVAL = "interval"; private static final String FIELDS = "fields"; private static final ConstructingObjectParser PARSER; static { @@ -86,39 +80,6 @@ public String[] getFields() { return fields; } - /** - * This returns a set of aggregation builders which represent the configured - * set of histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - if (fields.length == 0) { - return Collections.emptyList(); - } - - return Arrays.stream(fields).map(f -> { - HistogramValuesSourceBuilder vsBuilder - = new HistogramValuesSourceBuilder(RollupField.formatIndexerAggName(f, HistogramAggregationBuilder.NAME)); - vsBuilder.interval(interval); - vsBuilder.field(f); - vsBuilder.missingBucket(true); - return vsBuilder; - }).collect(Collectors.toList()); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(2); - map.put("agg", HistogramAggregationBuilder.NAME); - map.put(INTERVAL, interval); - return map; - } - - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java index cc673c4ed0d35..3a267e4cfa47c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java @@ -16,22 +16,12 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -54,11 +44,11 @@ public class MetricConfig implements Writeable, ToXContentObject { // TODO: replace these with an enum - private static final ParseField MIN = new ParseField("min"); - private static final ParseField MAX = new ParseField("max"); - private static final ParseField SUM = new ParseField("sum"); - private static final ParseField AVG = new ParseField("avg"); - private static final ParseField VALUE_COUNT = new ParseField("value_count"); + public static final ParseField MIN = new ParseField("min"); + public static final ParseField MAX = new ParseField("max"); + public static final ParseField SUM = new ParseField("sum"); + public static final ParseField AVG = new ParseField("avg"); + public static final ParseField VALUE_COUNT = new ParseField("value_count"); static final String NAME = "metrics"; private static final String FIELD = "field"; @@ -112,53 +102,6 @@ public List getMetrics() { return metrics; } - /** - * This returns a set of aggregation builders which represent the configured - * set of metrics. Used by the rollup indexer to iterate over historical data - */ - public List toBuilders() { - if (metrics.size() == 0) { - return Collections.emptyList(); - } - - List aggs = new ArrayList<>(metrics.size()); - for (String metric : metrics) { - ValuesSourceAggregationBuilder.LeafOnly newBuilder; - if (metric.equals(MIN.getPreferredName())) { - newBuilder = new MinAggregationBuilder(RollupField.formatFieldName(field, MinAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(MAX.getPreferredName())) { - newBuilder = new MaxAggregationBuilder(RollupField.formatFieldName(field, MaxAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(AVG.getPreferredName())) { - // Avgs are sum + count - newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.VALUE)); - ValuesSourceAggregationBuilder.LeafOnly countBuilder - = new ValueCountAggregationBuilder( - RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.COUNT_FIELD), ValueType.NUMERIC); - countBuilder.field(field); - aggs.add(countBuilder); - } else if (metric.equals(SUM.getPreferredName())) { - newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, SumAggregationBuilder.NAME, RollupField.VALUE)); - } else if (metric.equals(VALUE_COUNT.getPreferredName())) { - // TODO allow non-numeric value_counts. - // Hardcoding this is fine for now since the job validation guarantees that all metric fields are numerics - newBuilder = new ValueCountAggregationBuilder( - RollupField.formatFieldName(field, ValueCountAggregationBuilder.NAME, RollupField.VALUE), ValueType.NUMERIC); - } else { - throw new IllegalArgumentException("Unsupported metric type [" + metric + "]"); - } - newBuilder.field(field); - aggs.add(newBuilder); - } - return aggs; - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public List> toAggCap() { - return metrics.stream().map(metric -> Collections.singletonMap("agg", (Object)metric)).collect(Collectors.toList()); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java new file mode 100644 index 0000000000000..87915671b79a2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexing.IndexerJobStats; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The Rollup specialization of stats for the AsyncTwoPhaseIndexer. + * Note: instead of `documents_indexed`, this XContent show `rollups_indexed` + */ +public class RollupIndexerJobStats extends IndexerJobStats { + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("rollups_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME.getPreferredName(), + args -> new RollupIndexerJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + } + + public RollupIndexerJobStats() { + super(); + } + + public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } + + public RollupIndexerJobStats(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.endObject(); + return builder; + } + + public static RollupIndexerJobStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java deleted file mode 100644 index 06cfb520af552..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * This class holds the runtime statistics of a job. The stats are not used by any internal process - * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the - * allocated task is shutdown/restarted on a different node all the stats will reset. - */ -public class RollupJobStats implements ToXContentObject, Writeable { - - public static final ParseField NAME = new ParseField("job_stats"); - - private static ParseField NUM_PAGES = new ParseField("pages_processed"); - private static ParseField NUM_DOCUMENTS = new ParseField("documents_processed"); - private static ParseField NUM_ROLLUPS = new ParseField("rollups_indexed"); - private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); - - private long numPages = 0; - private long numDocuments = 0; - private long numRollups = 0; - private long numInvocations = 0; - - public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME.getPreferredName(), - args -> new RollupJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); - - static { - PARSER.declareLong(constructorArg(), NUM_PAGES); - PARSER.declareLong(constructorArg(), NUM_DOCUMENTS); - PARSER.declareLong(constructorArg(), NUM_ROLLUPS); - PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); - } - - public RollupJobStats() { - } - - public RollupJobStats(long numPages, long numDocuments, long numRollups, long numInvocations) { - this.numPages = numPages; - this.numDocuments = numDocuments; - this.numRollups = numRollups; - this.numInvocations = numInvocations; - } - - public RollupJobStats(StreamInput in) throws IOException { - this.numPages = in.readVLong(); - this.numDocuments = in.readVLong(); - this.numRollups = in.readVLong(); - this.numInvocations = in.readVLong(); - } - - public long getNumPages() { - return numPages; - } - - public long getNumDocuments() { - return numDocuments; - } - - public long getNumInvocations() { - return numInvocations; - } - - public long getNumRollups() { - return numRollups; - } - - public void incrementNumPages(long n) { - assert(n >= 0); - numPages += n; - } - - public void incrementNumDocuments(long n) { - assert(n >= 0); - numDocuments += n; - } - - public void incrementNumInvocations(long n) { - assert(n >= 0); - numInvocations += n; - } - - public void incrementNumRollups(long n) { - assert(n >= 0); - numRollups += n; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(numPages); - out.writeVLong(numDocuments); - out.writeVLong(numRollups); - out.writeVLong(numInvocations); - } - - public static RollupJobStats fromXContent(XContentParser parser) { - try { - return PARSER.parse(parser, null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(NUM_PAGES.getPreferredName(), numPages); - builder.field(NUM_DOCUMENTS.getPreferredName(), numDocuments); - builder.field(NUM_ROLLUPS.getPreferredName(), numRollups); - builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - RollupJobStats that = (RollupJobStats) other; - - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numDocuments, that.numDocuments) - && Objects.equals(this.numRollups, that.numRollups) - && Objects.equals(this.numInvocations, that.numInvocations); - } - - @Override - public int hashCode() { - return Objects.hash(numPages, numDocuments, numRollups, numInvocations); - } - -} - diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 640385c9c80d5..0a2f046907c80 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; import java.util.HashMap; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java index 32507d57f32b0..fbc039843258e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java @@ -18,17 +18,11 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.xpack.core.rollup.RollupField; import java.io.IOException; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -80,29 +74,6 @@ public String[] getFields() { return fields; } - /** - * This returns a set of aggregation builders which represent the configured - * set of date histograms. Used by the rollup indexer to iterate over historical data - */ - public List> toBuilders() { - return Arrays.stream(fields).map(f -> { - TermsValuesSourceBuilder vsBuilder - = new TermsValuesSourceBuilder(RollupField.formatIndexerAggName(f, TermsAggregationBuilder.NAME)); - vsBuilder.field(f); - vsBuilder.missingBucket(true); - return vsBuilder; - }).collect(Collectors.toList()); - } - - /** - * @return A map representing this config object as a RollupCaps aggregation object - */ - public Map toAggCap() { - Map map = new HashMap<>(1); - map.put("agg", TermsAggregationBuilder.NAME); - return map; - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index e405c5e4e007e..66a2eb358986a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -3,8 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core.scheduler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -13,6 +19,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -88,13 +95,20 @@ public interface Schedule { } private final Map schedules = ConcurrentCollections.newConcurrentMap(); - private final ScheduledExecutorService scheduler; private final Clock clock; + private final ScheduledExecutorService scheduler; + private final Logger logger; private final List listeners = new CopyOnWriteArrayList<>(); - public SchedulerEngine(Clock clock) { - this.clock = clock; - this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory("trigger_engine_scheduler")); + public SchedulerEngine(final Settings settings, final Clock clock) { + this(settings, clock, LogManager.getLogger(SchedulerEngine.class)); + } + + SchedulerEngine(final Settings settings, final Clock clock, final Logger logger) { + this.clock = Objects.requireNonNull(clock, "clock"); + this.scheduler = Executors.newScheduledThreadPool( + 1, EsExecutors.daemonThreadFactory(Objects.requireNonNull(settings, "settings"), "trigger_engine_scheduler")); + this.logger = Objects.requireNonNull(logger, "logger"); } public void register(Listener listener) { @@ -143,10 +157,15 @@ public int jobCount() { return schedules.size(); } - protected void notifyListeners(String name, long triggeredTime, long scheduledTime) { + protected void notifyListeners(final String name, final long triggeredTime, final long scheduledTime) { final Event event = new Event(name, triggeredTime, scheduledTime); - for (Listener listener : listeners) { - listener.triggered(event); + for (final Listener listener : listeners) { + try { + listener.triggered(event); + } catch (final Exception e) { + // do not allow exceptions to escape this method; we should continue to notify listeners and schedule the next run + logger.warn(new ParameterizedMessage("listener failed while handling triggered event [{}]", name), e); + } } } @@ -168,8 +187,20 @@ class ActiveSchedule implements Runnable { @Override public void run() { - long triggeredTime = clock.millis(); - notifyListeners(name, triggeredTime, scheduledTime); + final long triggeredTime = clock.millis(); + try { + notifyListeners(name, triggeredTime, scheduledTime); + } catch (final Throwable t) { + /* + * Allowing the throwable to escape here will lead to be it being caught in FutureTask#run and set as the outcome of this + * task; however, we never inspect the the outcomes of these scheduled tasks and so allowing the throwable to escape + * unhandled here could lead to us losing fatal errors. Instead, we rely on ExceptionsHelper#maybeDieOnAnotherThread to + * appropriately dispatch any error to the uncaught exception handler. We should never see an exception here as these do + * not escape from SchedulerEngine#notifyListeners. + */ + ExceptionsHelper.maybeDieOnAnotherThread(t); + throw t; + } scheduleNextRun(triggeredTime); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java index 8d56221d78bee..99788ac1de43a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; import org.elasticsearch.node.Node; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java index 536464cb3376d..7f22f90351ef5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index b8c2685d28a11..562e22a1eb925 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -34,32 +33,6 @@ public PutPrivilegesRequestBuilder(ElasticsearchClient client, PutPrivilegesActi super(client, action, new PutPrivilegesRequest()); } - /** - * Populate the put privileges request using the given source, application name and privilege name - * The source must contain a single privilege object which matches the application and privilege names. - */ - public PutPrivilegesRequestBuilder source(String applicationName, String expectedName, - BytesReference source, XContentType xContentType) - throws IOException { - Objects.requireNonNull(xContentType); - // EMPTY is ok here because we never call namedObject - try (InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - final ApplicationPrivilegeDescriptor privilege = parsePrivilege(parser, applicationName, expectedName); - this.request.setPrivileges(Collections.singleton(privilege)); - } else { - throw new ElasticsearchParseException("expected an object but found {} instead", token); - } - } - return this; - } - ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, String applicationName, String privilegeName) throws IOException { ApplicationPrivilegeDescriptor privilege = ApplicationPrivilegeDescriptor.parse(parser, applicationName, privilegeName, false); checkPrivilegeName(privilege, applicationName, privilegeName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 96c9c817182ff..82863a6e8d155 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -167,7 +167,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < indicesSize; i++) { indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.createFrom(in)); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readList(RoleDescriptor.ApplicationResourcePrivileges::createFrom); conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); } @@ -185,7 +185,7 @@ public void writeTo(StreamOutput out) throws IOException { for (RoleDescriptor.IndicesPrivileges index : indicesPrivileges) { index.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeStreamableList(applicationPrivileges); ConditionalClusterPrivileges.writeArray(out, this.conditionalClusterPrivileges); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index 5956e1a661345..ed31f0cc020c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -15,10 +15,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -29,6 +33,37 @@ */ public final class CreateTokenRequest extends ActionRequest { + public enum GrantType { + PASSWORD("password"), + REFRESH_TOKEN("refresh_token"), + AUTHORIZATION_CODE("authorization_code"), + CLIENT_CREDENTIALS("client_credentials"); + + private final String value; + + GrantType(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static GrantType fromString(String grantType) { + if (grantType != null) { + for (GrantType type : values()) { + if (type.getValue().equals(grantType)) { + return type; + } + } + } + return null; + } + } + + private static final Set SUPPORTED_GRANT_TYPES = Collections.unmodifiableSet( + EnumSet.of(GrantType.PASSWORD, GrantType.REFRESH_TOKEN, GrantType.CLIENT_CREDENTIALS)); + private String grantType; private String username; private SecureString password; @@ -49,33 +84,58 @@ public CreateTokenRequest(String grantType, @Nullable String username, @Nullable @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if ("password".equals(grantType)) { - if (Strings.isNullOrEmpty(username)) { - validationException = addValidationError("username is missing", validationException); - } - if (password == null || password.getChars() == null || password.getChars().length == 0) { - validationException = addValidationError("password is missing", validationException); - } - if (refreshToken != null) { - validationException = - addValidationError("refresh_token is not supported with the password grant_type", validationException); - } - } else if ("refresh_token".equals(grantType)) { - if (username != null) { - validationException = - addValidationError("username is not supported with the refresh_token grant_type", validationException); - } - if (password != null) { - validationException = - addValidationError("password is not supported with the refresh_token grant_type", validationException); - } - if (refreshToken == null) { - validationException = addValidationError("refresh_token is missing", validationException); + GrantType type = GrantType.fromString(grantType); + if (type != null) { + switch (type) { + case PASSWORD: + if (Strings.isNullOrEmpty(username)) { + validationException = addValidationError("username is missing", validationException); + } + if (password == null || password.getChars() == null || password.getChars().length == 0) { + validationException = addValidationError("password is missing", validationException); + } + if (refreshToken != null) { + validationException = + addValidationError("refresh_token is not supported with the password grant_type", validationException); + } + break; + case REFRESH_TOKEN: + if (username != null) { + validationException = + addValidationError("username is not supported with the refresh_token grant_type", validationException); + } + if (password != null) { + validationException = + addValidationError("password is not supported with the refresh_token grant_type", validationException); + } + if (refreshToken == null) { + validationException = addValidationError("refresh_token is missing", validationException); + } + break; + case CLIENT_CREDENTIALS: + if (username != null) { + validationException = + addValidationError("username is not supported with the client_credentials grant_type", validationException); + } + if (password != null) { + validationException = + addValidationError("password is not supported with the client_credentials grant_type", validationException); + } + if (refreshToken != null) { + validationException = addValidationError("refresh_token is not supported with the client_credentials grant_type", + validationException); + } + break; + default: + validationException = addValidationError("grant_type only supports the values: [" + + SUPPORTED_GRANT_TYPES.stream().map(GrantType::getValue).collect(Collectors.joining(", ")) + "]", + validationException); } } else { - validationException = addValidationError("grant_type only supports the values: [password, refresh_token]", validationException); + validationException = addValidationError("grant_type only supports the values: [" + + SUPPORTED_GRANT_TYPES.stream().map(GrantType::getValue).collect(Collectors.joining(", ")) + "]", + validationException); } - return validationException; } @@ -126,6 +186,11 @@ public String getRefreshToken() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + if (out.getVersion().before(Version.V_6_5_0) && GrantType.CLIENT_CREDENTIALS.getValue().equals(grantType)) { + throw new IllegalArgumentException("a request with the client_credentials grant_type cannot be sent to version [" + + out.getVersion() + "]"); + } + out.writeString(grantType); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeOptionalString(username); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 1cb1029e820e0..30111a92431dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -59,8 +59,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(tokenString); out.writeTimeValue(expiresIn); out.writeOptionalString(scope); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeString(refreshToken); + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { + out.writeOptionalString(refreshToken); + } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + if (refreshToken == null) { + out.writeString(""); + } else { + out.writeString(refreshToken); + } } } @@ -70,7 +76,9 @@ public void readFrom(StreamInput in) throws IOException { tokenString = in.readString(); expiresIn = in.readTimeValue(); scope = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { + refreshToken = in.readOptionalString(); + } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { refreshToken = in.readString(); } } @@ -90,4 +98,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } return builder.endObject(); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateTokenResponse that = (CreateTokenResponse) o; + return Objects.equals(tokenString, that.tokenString) && + Objects.equals(expiresIn, that.expiresIn) && + Objects.equals(scope, that.scope) && + Objects.equals(refreshToken, that.refreshToken); + } + + @Override + public int hashCode() { + return Objects.hash(tokenString, expiresIn, scope, refreshToken); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index c3e126b92ce11..0cf7ace1103d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java index f84b133d984b6..b78b81c060080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java index 05b3af41de222..d7538c2a556f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.support.Validation; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java index 0da525b6ffc4f..666b79cfe5db7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Collection; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java index dc43db0115e0a..4f5aed012cb11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java @@ -109,7 +109,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < indexSize; i++) { indexPrivileges[i] = RoleDescriptor.IndicesPrivileges.createFrom(in); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); } } @@ -123,7 +123,7 @@ public void writeTo(StreamOutput out) throws IOException { for (RoleDescriptor.IndicesPrivileges priv : indexPrivileges) { priv.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, applicationPrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java index b0711fc1bc12f..8cd8b510c6499 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -66,7 +66,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); completeMatch = in.readBoolean(); index = readResourcePrivileges(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { application = in.readMap(StreamInput::readString, HasPrivilegesResponse::readResourcePrivileges); } } @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(completeMatch); writeResourcePrivileges(out, index); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeMap(application, StreamOutput::writeString, HasPrivilegesResponse::writeResourcePrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java index f37072b9cf0fc..e704259396a34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -8,12 +8,12 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; import java.io.IOException; import java.util.Map; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java index 7dc958bbef9f9..eea804d81fe9f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.support.Validation; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index e72df007cf63c..161d9d449990f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.Base64; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java index 08e01025e7e84..0f073ef4ae398 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.common.Nullable; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index ae04d474f41cb..bc8869d5d8356 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -8,8 +8,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Collections; import java.util.HashMap; @@ -146,6 +148,14 @@ public String toString() { return type + "/" + config.name; } + /** + * This is no-op in the base class, but allows realms to be aware of what other realms are configured + * + * @see DelegatedAuthorizationSettings + */ + public void initialize(Iterable realms, XPackLicenseState licenseState) { + } + /** * A factory interface to construct a security realm. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java index c9868f448b40f..5a228133073e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java @@ -19,6 +19,7 @@ public static boolean isReserved(String username, Settings settings) { case UsernamesField.KIBANA_NAME: case UsernamesField.LOGSTASH_NAME: case UsernamesField.BEATS_NAME: + case UsernamesField.APM_NAME: return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); default: return AnonymousUser.isAnonymousUsername(username, settings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java index 7524ef08c1e72..656632a2ec631 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import java.util.Set; @@ -44,7 +45,9 @@ private KerberosRealmSettings() { * @return the valid set of {@link Setting}s for a {@value #TYPE} realm */ public static Set> getSettings() { - return Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, SETTING_KRB_DEBUG_ENABLE, - SETTING_REMOVE_REALM_NAME); + final Set> settings = Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, + SETTING_KRB_DEBUG_ENABLE, SETTING_REMOVE_REALM_NAME); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); + return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java index 0bb9f195af7fc..3f79c722be3f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; import java.util.HashSet; @@ -37,6 +38,7 @@ public static Set> getSettings(String type) { assert LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + AD_TYPE + ", " + LDAP_TYPE + "]"; settings.addAll(LdapSessionFactorySettings.getSettings()); settings.addAll(LdapUserSearchSessionFactorySettings.getSettings()); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); } settings.addAll(LdapMetaDataResolverSettings.getSettings()); return settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java index a3539b30d3e57..53af4938a8ff4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; @@ -43,6 +44,7 @@ public static Set> getSettings() { settings.add(SSL_SETTINGS.truststoreAlgorithm); settings.add(SSL_SETTINGS.caPaths); + settings.addAll(DelegatedAuthorizationSettings.getSettings()); settings.addAll(CompositeRoleMapperSettings.getSettings()); return settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java index cf28b995127c3..e254cee124300 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; @@ -89,6 +90,7 @@ public static Set> getSettings() { set.addAll(DN_ATTRIBUTE.settings()); set.addAll(NAME_ATTRIBUTE.settings()); set.addAll(MAIL_ATTRIBUTE.settings()); + set.addAll(DelegatedAuthorizationSettings.getSettings()); return set; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java index ceb93dc4c853c..a93476bbdc8da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java @@ -14,6 +14,7 @@ // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.settings.SecureString; import java.security.SecureRandom; @@ -54,7 +55,7 @@ * String stronger_salt = BCrypt.gensalt(12)
    * *

    - * The amount of work increases exponentially (2**log_rounds), so + * The amount of work increases exponentially (2**log_rounds), so * each increment is twice as much work. The default log_rounds is * 10, and the valid range is 4 to 30. * @@ -689,7 +690,11 @@ public static String hashpw(SecureString password, String salt) { // the next lines are the SecureString replacement for the above commented-out section if (minor >= 'a') { - try (SecureString secureString = new SecureString(CharArrays.concat(password.getChars(), "\000".toCharArray()))) { + final char[] suffix = "\000".toCharArray(); + final char[] result = new char[password.length() + suffix.length]; + System.arraycopy(password.getChars(), 0, result, 0, password.length()); + System.arraycopy(suffix, 0, result, password.length(), suffix.length); + try (SecureString secureString = new SecureString(result)) { passwordb = CharArrays.toUtf8Bytes(secureString.getChars()); } } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java deleted file mode 100644 index 26df90c31a2de..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.security.authc.support; - -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; - -/** - * Helper class similar to Arrays to handle conversions for Char arrays - */ -public class CharArrays { - - public static char[] utf8BytesToChars(byte[] utf8Bytes) { - ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); - CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); - char[] chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); - byteBuffer.clear(); - charBuffer.clear(); - return chars; - } - - /** - * Like String.indexOf for for an array of chars - */ - static int indexOf(char[] array, char ch) { - for (int i = 0; (i < array.length); i++) { - if (array[i] == ch) { - return i; - } - } - return -1; - } - - /** - * Converts the provided char[] to a UTF-8 byte[]. The provided char[] is not modified by this - * method, so the caller needs to take care of clearing the value if it is sensitive. - */ - public static byte[] toUtf8Bytes(char[] chars) { - CharBuffer charBuffer = CharBuffer.wrap(chars); - ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); - byte[] bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data - return bytes; - } - - public static boolean charsBeginsWith(String prefix, char[] chars) { - if (chars == null || prefix == null) { - return false; - } - - if (prefix.length() > chars.length) { - return false; - } - - for (int i = 0; i < prefix.length(); i++) { - if (chars[i] != prefix.charAt(i)) { - return false; - } - } - - return true; - } - - public static boolean constantTimeEquals(char[] a, char[] b) { - if (a.length != b.length) { - return false; - } - - int equals = 0; - for (int i = 0; i < a.length; i++) { - equals |= a[i] ^ b[i]; - } - - return equals == 0; - } - - public static boolean constantTimeEquals(String a, String b) { - if (a.length() != b.length()) { - return false; - } - - int equals = 0; - for (int i = 0; i < a.length(); i++) { - equals |= a.charAt(i) ^ b.charAt(i); - } - - return equals == 0; - } - - public static char[] concat(char[] a, char[] b) { - final char[] result = new char[a.length + b.length]; - System.arraycopy(a, 0, result, 0, a.length); - System.arraycopy(b, 0, result, a.length, b.length); - return result; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java new file mode 100644 index 0000000000000..b8384a76b41ad --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.settings.Setting; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +/** + * Settings related to "Delegated Authorization" (aka Lookup Realms) + */ +public class DelegatedAuthorizationSettings { + + public static final Setting> AUTHZ_REALMS = Setting.listSetting("authorization_realms", + Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + + public static Collection> getSettings() { + return Collections.singleton(AUTHZ_REALMS); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index d12547bd90645..492622b2c519c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureString; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java index d8e58c29d237b..1349303600884 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -107,7 +108,7 @@ private static UsernamePasswordToken extractToken(String headerValue) { throw authenticationError("invalid basic authentication header encoding", e); } - int i = CharArrays.indexOf(userpasswd, ':'); + int i = indexOfColon(userpasswd); if (i < 0) { throw authenticationError("invalid basic authentication header value"); } @@ -121,4 +122,15 @@ public static void putTokenHeader(ThreadContext context, UsernamePasswordToken t context.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue(token.username, token.password)); } + /** + * Like String.indexOf for for an array of chars + */ + private static int indexOfColon(char[] array) { + for (int i = 0; (i < array.length); i++) { + if (array[i] == ':') { + return i; + } + } + return -1; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 54fd8cc7974b2..69712a6f33de7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -243,16 +243,11 @@ public static RoleDescriptor readFrom(StreamInput in) throws IOException { String[] runAs = in.readStringArray(); Map metadata = in.readMap(); - final Map transientMetadata; - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - transientMetadata = in.readMap(); - } else { - transientMetadata = Collections.emptyMap(); - } + final Map transientMetadata = in.readMap(); final ApplicationResourcePrivileges[] applicationPrivileges; final ConditionalClusterPrivilege[] conditionalClusterPrivileges; - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); } else { @@ -273,10 +268,8 @@ public static void writeTo(RoleDescriptor descriptor, StreamOutput out) throws I } out.writeStringArray(descriptor.runAs); out.writeMap(descriptor.metadata); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeMap(descriptor.transientMetadata); - } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeMap(descriptor.transientMetadata); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, descriptor.applicationPrivileges); ConditionalClusterPrivileges.writeArray(out, descriptor.getConditionalClusterPrivileges()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java index e812f0cfc7332..9426b64364783 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java @@ -63,7 +63,7 @@ import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 0c59343636553..22cb1c357c661 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -112,6 +112,8 @@ private static Map initializeReservedRoles() { null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE, new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put(UsernamesField.APM_ROLE, new RoleDescriptor(UsernamesField.APM_ROLE, + new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-notifications").privileges("view_index_metadata", "read").build() }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java index e1d3a2db8e952..d3cc60194f2cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java @@ -292,12 +292,6 @@ public GetPrivilegesRequestBuilder prepareGetPrivileges(String applicationName, return new GetPrivilegesRequestBuilder(client, GetPrivilegesAction.INSTANCE).application(applicationName).privileges(privileges); } - public PutPrivilegesRequestBuilder preparePutPrivilege(String applicationName, String privilegeName, - BytesReference bytesReference, XContentType xContentType) throws IOException { - return new PutPrivilegesRequestBuilder(client, PutPrivilegesAction.INSTANCE) - .source(applicationName, privilegeName, bytesReference, xContentType); - } - public PutPrivilegesRequestBuilder preparePutPrivileges(BytesReference bytesReference, XContentType xContentType) throws IOException { return new PutPrivilegesRequestBuilder(client, PutPrivilegesAction.INSTANCE).source(bytesReference, xContentType); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java new file mode 100644 index 0000000000000..c26b66875e6e2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/APMSystemUser.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for APM server internals. Currently used for APM server monitoring. + */ +public class APMSystemUser extends User { + + public static final String NAME = UsernamesField.APM_NAME; + public static final String ROLE_NAME = UsernamesField.APM_ROLE; + public static final Version DEFINED_SINCE = Version.V_6_5_0; + public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); + + public APMSystemUser(boolean enabled) { + super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java index ae6f41c3a1bd4..36354ff58b318 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import java.util.Collections; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java index 9db64da97a539..dfa437fa8d2c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.user; import org.elasticsearch.Version; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java index c58f86ea42245..ec618a4f4821c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java index c0b45aea57c20..fa41828a7bba8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.protocol.xpack.security.User; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java index 3e816aa54bca5..8dfa149987d0e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java index 047758177fb0b..88381482ef3fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -16,8 +14,6 @@ public class LogstashSystemUser extends User { public static final String NAME = UsernamesField.LOGSTASH_NAME; public static final String ROLE_NAME = UsernamesField.LOGSTASH_ROLE; - public static final Version DEFINED_SINCE = Version.V_5_2_0; - public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); public LogstashSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java index 1c7ac129d17b8..4569c2a68a09b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege; import java.util.function.Predicate; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java similarity index 85% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java index 42e957ecf2d51..028b14f882aab 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java @@ -1,25 +1,10 @@ /* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. */ +package org.elasticsearch.xpack.core.security.user; -package org.elasticsearch.protocol.xpack.security; - -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -199,12 +184,7 @@ public static User partialReadFrom(String username, StreamInput input) throws IO boolean hasInnerUser = input.readBoolean(); if (hasInnerUser) { User innerUser = readFrom(input); - if (input.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: runas user was read first, so reverse outer and inner - return new User(innerUser, outerUser); - } else { - return new User(outerUser, innerUser); - } + return new User(outerUser, innerUser); } else { return outerUser; } @@ -221,11 +201,6 @@ public static void writeTo(User user, StreamOutput output) throws IOException { if (user.authenticatedUser == null) { // no backcompat necessary, since there is no inner user writeUser(user, output); - } else if (output.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: write runas user as the "inner" user - writeUser(user.authenticatedUser, output); - output.writeBoolean(true); - writeUser(user, output); } else { writeUser(user, output); output.writeBoolean(true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java index 3b691b927b4a3..bd886567ed1b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java @@ -20,6 +20,8 @@ public final class UsernamesField { public static final String LOGSTASH_ROLE = "logstash_system"; public static final String BEATS_NAME = "beats_system"; public static final String BEATS_ROLE = "beats_system"; + public static final String APM_NAME = "apm_system"; + public static final String APM_ROLE = "apm_system"; private UsernamesField() {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java index e98df7fb50a56..906d354837783 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; - /** * internal user that manages xpack security. Has all cluster/indices permissions. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java index fe50b1b9c88bc..38c9fe84aa934 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java index d959c017e0a35..421b30baac7b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ssl; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.BufferedReader; import java.io.IOException; @@ -58,6 +58,7 @@ public class PemUtils { private static final String OPENSSL_EC_FOOTER = "-----END EC PRIVATE KEY-----"; private static final String OPENSSL_EC_PARAMS_HEADER = "-----BEGIN EC PARAMETERS-----"; private static final String OPENSSL_EC_PARAMS_FOOTER = "-----END EC PARAMETERS-----"; + private static final String HEADER = "-----BEGIN"; private PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); @@ -74,6 +75,9 @@ private PemUtils() { public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) { try (BufferedReader bReader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { String line = bReader.readLine(); + while (null != line && line.startsWith(HEADER) == false){ + line = bReader.readLine(); + } if (null == line) { throw new IllegalStateException("Error parsing Private Key from: " + keyPath.toString() + ". File is empty"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index b1f3a32769ec9..a25e79ffdf66f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.watcher.WatcherField; import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index dd17baf04740f..bac5930c0d5c9 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -4,7 +4,7 @@ "settings" : { "number_of_shards" : 1, "number_of_replicas" : 0, - "auto_expand_replicas" : "0-all", + "auto_expand_replicas" : "0-1", "index.priority": 1000, "index.format": 6, "analysis" : { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java new file mode 100644 index 0000000000000..58ca42c7f681e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -0,0 +1,414 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public final class RemoteClusterLicenseCheckerTests extends ESTestCase { + + public void testIsNotRemoteIndex() { + assertFalse(RemoteClusterLicenseChecker.isRemoteIndex("local-index")); + } + + public void testIsRemoteIndex() { + assertTrue(RemoteClusterLicenseChecker.isRemoteIndex("remote-cluster:remote-index")); + } + + public void testNoRemoteIndex() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertFalse(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testRemoteIndex() { + final List indices = Arrays.asList("local-index", "remote-cluster:remote-index"); + assertTrue(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testNoRemoteIndices() { + final List indices = Collections.singletonList("local-index"); + assertThat(RemoteClusterLicenseChecker.remoteIndices(indices), is(empty())); + } + + public void testRemoteIndices() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:index1", "local-index2", "remote-cluster2:index1"); + assertThat( + RemoteClusterLicenseChecker.remoteIndices(indices), + containsInAnyOrder("remote-cluster1:index1", "remote-cluster2:index1")); + } + + public void testNoRemoteClusterAliases() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), empty()); + } + + public void testOneRemoteClusterAlias() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1")); + } + + public void testMoreThanOneRemoteClusterAlias() { + final List indices = Arrays.asList("remote-cluster1:remote-index1", "local-index1", "remote-cluster2:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testDuplicateRemoteClusterAlias() { + final List indices = Arrays.asList( + "remote-cluster1:remote-index1", "local-index1", "remote-cluster2:index1", "remote-cluster2:remote-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testCheckRemoteClusterLicensesGivenCompatibleLicenses() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertTrue(licenseCheck.get().isSuccess()); + } + + public void testCheckRemoteClusterLicensesGivenIncompatibleLicense() { + final AtomicInteger index = new AtomicInteger(); + final List remoteClusterAliases = Arrays.asList("good", "cluster-with-basic-license", "good2"); + final List responses = new ArrayList<>(); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertFalse(licenseCheck.get().isSuccess()); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().clusterAlias(), equalTo("cluster-with-basic-license")); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().licenseInfo().getType(), equalTo("BASIC")); + } + + public void testCheckRemoteClusterLicencesGivenNonExistentCluster() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final String failingClusterAlias = randomFrom(remoteClusterAliases); + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, failingClusterAlias); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + final AtomicReference exception = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + exception.set(e); + } + + })); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(ElasticsearchException.class)); + assertThat(exception.get().getMessage(), equalTo("could not determine the license type for cluster [" + failingClusterAlias + "]")); + assertNotNull(exception.get().getCause()); + assertThat(exception.get().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRemoteClusterLicenseCallUsesSystemContext() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + assertTrue(threadPool.getThreadContext().isSystemContext()); + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + + final List remoteClusterAliases = Collections.singletonList("valid"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, doubleInvocationProtectingListener(ActionListener.wrap(() -> {}))); + + verify(client, times(1)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + } finally { + terminate(threadPool); + } + } + + public void testListenerIsExecutedWithCallingContext() throws InterruptedException { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final Client client; + final boolean failure = randomBoolean(); + if (failure) { + client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, randomFrom(remoteClusterAliases)); + } else { + client = createMockClient(threadPool); + } + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); + + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + threadPool.getThreadContext().putHeader("key", "value"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (failure) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + @Override + public void onFailure(final Exception e) { + if (failure == false) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + })); + + assertTrue(listenerInvoked.get()); + } finally { + terminate(threadPool); + } + } + + public void testBuildErrorMessageForActiveCompatibleLicense() { + final XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); + final AssertionError e = expectThrows( + AssertionError.class, + () -> RemoteClusterLicenseChecker.buildErrorMessage("", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + assertThat(e, hasToString(containsString("license must be incompatible to build error message"))); + } + + public void testBuildErrorMessageForIncompatibleLicense() { + final XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license mode [BASIC] on cluster [basic-cluster] does not enable [Feature]")); + } + + public void testBuildErrorMessageForInactiveLicense() { + final XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license on cluster [expired-cluster] is not active")); + } + + private ActionListener doubleInvocationProtectingListener( + final ActionListener listener) { + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + return new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onResponse(response); + } + + @Override + public void onFailure(final Exception e) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onFailure(e); + } + + }; + } + + private ThreadPool createMockThreadPool() { + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + return threadPool; + } + + private Client createMockClient(final ThreadPool threadPool) { + return createMockClient(threadPool, client -> when(client.getRemoteClusterClient(anyString())).thenReturn(client)); + } + + private Client createMockClientThatThrowsOnGetRemoteClusterClient(final ThreadPool threadPool, final String clusterAlias) { + return createMockClient( + threadPool, + client -> { + when(client.getRemoteClusterClient(clusterAlias)).thenThrow(new IllegalArgumentException()); + when(client.getRemoteClusterClient(argThat(not(clusterAlias)))).thenReturn(client); + }); + } + + private Client createMockClient(final ThreadPool threadPool, final Consumer finish) { + final Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + finish.accept(client); + return client; + } + + private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index bb21ddbd1a13e..c2cb5af130538 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -229,7 +229,7 @@ public void testNewTrialDefaultsSecurityOff() { public void testOldTrialDefaultsSecurityOn() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_2_4)); + licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); assertThat(licenseState.isSecurityEnabled(), is(true)); assertThat(licenseState.isAuthAllowed(), is(true)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java new file mode 100644 index 0000000000000..fac99959c536a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.io.IOException; + +public class XPackInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { + return XPackInfoResponse.fromXContent(parser); + } + + @Override + protected XPackInfoResponse createBlankInstance() { + return new XPackInfoResponse(); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return path -> path.equals("features") + || (path.startsWith("features") && path.endsWith("native_code_info")); + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("human", randomBoolean() ? "true" : "false"); + } + if (randomBoolean()) { + params.put("categories", "_none"); + } + return new ToXContent.MapParams(params); + } + + @Override + protected XPackInfoResponse createTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + @Override + protected XPackInfoResponse mutateInstance(XPackInfoResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new XPackInfoResponse( + mutateBuildInfo(r.getBuildInfo()), + r.getLicenseInfo(), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + mutateLicenseInfo(r.getLicenseInfo()), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + r.getLicenseInfo(), + mutateFeatureSetsInfo(r.getFeatureSetsInfo()))); + return mutator.apply(response); + } + + private BuildInfo randomBuildInfo() { + return new BuildInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(15)); + } + + private BuildInfo mutateBuildInfo(BuildInfo buildInfo) { + if (buildInfo == null) { + return randomBuildInfo(); + } + return null; + } + + private LicenseInfo randomLicenseInfo() { + return new LicenseInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(4), + randomAlphaOfLength(5), + randomFrom(LicenseStatus.values()), + randomLong()); + } + + private LicenseInfo mutateLicenseInfo(LicenseInfo licenseInfo) { + if (licenseInfo == null) { + return randomLicenseInfo(); + } + return null; + } + + private FeatureSetsInfo randomFeatureSetsInfo() { + int size = between(0, 10); + Set featureSets = new HashSet<>(size); + while (featureSets.size() < size) { + featureSets.add(randomFeatureSet()); + } + return new FeatureSetsInfo(featureSets); + } + + private FeatureSetsInfo mutateFeatureSetsInfo(FeatureSetsInfo featureSetsInfo) { + if (featureSetsInfo == null) { + return randomFeatureSetsInfo(); + } + return null; + } + + private FeatureSet randomFeatureSet() { + return new FeatureSet( + randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(20), + randomBoolean(), + randomBoolean(), + randomNativeCodeInfo()); + } + + private Map randomNativeCodeInfo() { + if (randomBoolean()) { + return null; + } + int size = between(0, 10); + Map nativeCodeInfo = new HashMap<>(size); + while (nativeCodeInfo.size() < size) { + nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return nativeCodeInfo; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java new file mode 100644 index 0000000000000..c4e29d7c23005 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ProtocolUtilsTests extends ESTestCase { + + public void testMapStringEqualsAndHash() { + assertTrue(ProtocolUtils.equals(null, null)); + assertFalse(ProtocolUtils.equals(null, new HashMap<>())); + assertFalse(ProtocolUtils.equals(new HashMap<>(), null)); + + Map a = new HashMap<>(); + a.put("foo", new String[] { "a", "b" }); + a.put("bar", new String[] { "b", "c" }); + + Map b = new HashMap<>(); + b.put("foo", new String[] { "a", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "c", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "b", "c" }); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + + b.put("baz", new String[] { "b", "c" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + a.put("non", null); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("non", null); + b.remove("baz"); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 0000000000000..9f0f414569540 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices", "connections"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent( + NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java new file mode 100644 index 0000000000000..7149477d00765 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.test.ESTestCase; + +public class LicenseStatusTests extends ESTestCase { + public void testSerialization() throws IOException { + LicenseStatus status = randomFrom(LicenseStatus.values()); + assertSame(status, copyWriteable(status, writableRegistry(), LicenseStatus::readFrom)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java new file mode 100644 index 0000000000000..a09fd6fb99b45 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +public class PutLicenseResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they + // are treated as messages from new services + return p -> p.startsWith("acknowledge"); + } + + @Override + protected PutLicenseResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); + String messageHeader; + Map ackMessages; + if (randomBoolean()) { + messageHeader = randomAlphaOfLength(10); + ackMessages = randomAckMessages(); + } else { + messageHeader = null; + ackMessages = Collections.emptyMap(); + } + + return new PutLicenseResponse(acknowledged, status, messageHeader, ackMessages); + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } + + @Override + protected PutLicenseResponse doParseInstance(XContentParser parser) throws IOException { + return PutLicenseResponse.fromXContent(parser); + } + + @Override + protected PutLicenseResponse createBlankInstance() { + return new PutLicenseResponse(); + } + + @Override + protected PutLicenseResponse mutateInstance(PutLicenseResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new PutLicenseResponse( + r.isAcknowledged() == false, + r.status(), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> new PutLicenseResponse( + r.isAcknowledged(), + mutateStatus(r.status()), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> { + if (r.acknowledgeMessages().isEmpty()) { + return new PutLicenseResponse( + r.isAcknowledged(), + r.status(), + randomAlphaOfLength(10), + randomAckMessages() + ); + } else { + return new PutLicenseResponse(r.isAcknowledged(), r.status()); + } + } + + ); + return mutator.apply(response); + } + + private LicensesStatus mutateStatus(LicensesStatus status) { + return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values())); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java new file mode 100644 index 0000000000000..0e09a05fb967a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class IndexUpgradeInfoRequestTests extends AbstractWireSerializingTestCase { + @Override + protected IndexUpgradeInfoRequest createTestInstance() { + int indexCount = randomInt(4); + String[] indices = new String[indexCount]; + for (int i = 0; i < indexCount; i++) { + indices[i] = randomAlphaOfLength(10); + } + IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(indices); + if (randomBoolean()) { + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return IndexUpgradeInfoRequest::new; + } + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java new file mode 100644 index 0000000000000..57f01a4454e02 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +public class IndexUpgradeInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected IndexUpgradeInfoResponse doParseInstance(XContentParser parser) { + return IndexUpgradeInfoResponse.fromXContent(parser); + } + + @Override + protected IndexUpgradeInfoResponse createBlankInstance() { + return new IndexUpgradeInfoResponse(); + } + + @Override + protected IndexUpgradeInfoResponse createTestInstance() { + return randomIndexUpgradeInfoResponse(randomIntBetween(0, 10)); + } + + private static IndexUpgradeInfoResponse randomIndexUpgradeInfoResponse(int numIndices) { + Map actions = new HashMap<>(); + for (int i = 0; i < numIndices; i++) { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } + + @Override + protected IndexUpgradeInfoResponse mutateInstance(IndexUpgradeInfoResponse instance) { + if (instance.getActions().size() == 0) { + return randomIndexUpgradeInfoResponse(1); + } + Map actions = new HashMap<>(instance.getActions()); + if (randomBoolean()) { + Iterator> iterator = actions.entrySet().iterator(); + iterator.next(); + iterator.remove(); + } else { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java new file mode 100644 index 0000000000000..209bc790a8c54 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DeleteWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected DeleteWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean found = randomBoolean(); + return new DeleteWatchResponse(id, version, found); + } + + @Override + protected DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java new file mode 100644 index 0000000000000..1fc2f61b684c7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected PutWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean created = randomBoolean(); + return new PutWatchResponse(id, version, created); + } + + @Override + protected PutWatchResponse doParseInstance(XContentParser parser) throws IOException { + return PutWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java new file mode 100644 index 0000000000000..2662e05570c6d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncTwoPhaseIndexerTests extends ESTestCase { + + AtomicBoolean isFinished = new AtomicBoolean(false); + + private class MockIndexer extends AsyncTwoPhaseIndexer { + + // test the execution order + private int step; + + protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition) { + super(executor, initialState, initialPosition, new MockJobStats()); + } + + @Override + protected String getJobId() { + return "mock"; + } + + @Override + protected IterationResult doProcess(SearchResponse searchResponse) { + assertThat(step, equalTo(3)); + ++step; + return new IterationResult(Collections.emptyList(), 3, true); + } + + @Override + protected SearchRequest buildSearchRequest() { + assertThat(step, equalTo(1)); + ++step; + return null; + } + + @Override + protected void onStartJob(long now) { + assertThat(step, equalTo(0)); + ++step; + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + assertThat(step, equalTo(2)); + ++step; + final SearchResponseSections sections = new SearchResponseSections(new SearchHits(new SearchHit[0], 0, 0), null, null, false, + null, null, 1); + + nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + fail("should not be called"); + } + + @Override + protected void doSaveState(IndexerState state, Integer position, Runnable next) { + assertThat(step, equalTo(4)); + ++step; + next.run(); + } + + @Override + protected void onFailure(Exception exc) { + fail(exc.getMessage()); + } + + @Override + protected void onFinish() { + assertThat(step, equalTo(5)); + ++step; + isFinished.set(true); + } + + @Override + protected void onAbort() { + } + + public int getStep() { + return step; + } + + } + + private static class MockJobStats extends IndexerJobStats { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } + } + + public void testStateMachine() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + + try { + + MockIndexer indexer = new MockIndexer(executor, state, 2); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertThat(indexer.getPosition(), equalTo(2)); + ESTestCase.awaitBusy(() -> isFinished.get()); + assertThat(indexer.getStep(), equalTo(6)); + assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); + assertThat(indexer.getStats().getNumPages(), equalTo(1L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); + assertTrue(indexer.abort()); + } finally { + executor.shutdownNow(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java index ec17a37e23b2b..329800c2f1a24 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index ffc13655d229c..3030449abd1b6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -100,7 +100,7 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long b if (aggHistogramInterval == null) { builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); } else { - builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval)); + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 5) * aggHistogramInterval)); } } if (randomBoolean()) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 88d9b07816d44..7e53478533eb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.Set; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -479,19 +478,6 @@ public void testBuilder_givenTimeFieldInAnalysisConfig() { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } - public void testGetCompatibleJobTypes_givenVersionBefore_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_0_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_2).isEmpty(), is(true)); - } - - public void testGetCompatibleJobTypes_givenVersionAfter_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0).size(), equalTo(1)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0).size(), equalTo(1)); - } - public void testInvalidCreateTimeSettings() { Job.Builder builder = new Job.Builder("invalid-settings"); builder.setModelSnapshotId("snapshot-foo"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index c1f25bead224e..9aedf61859d32 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -274,6 +274,8 @@ public void testIsAutodetectProcessUpdate() { assertTrue(update.isAutodetectProcessUpdate()); update = new JobUpdate.Builder("foo").setDetectorUpdates(Collections.singletonList(mock(JobUpdate.DetectorUpdate.class))).build(); assertTrue(update.isAutodetectProcessUpdate()); + update = new JobUpdate.Builder("foo").setGroups(Arrays.asList("bar")).build(); + assertTrue(update.isAutodetectProcessUpdate()); } public void testUpdateAnalysisLimitWithValueGreaterThanMax() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java index a0df63bc38dde..1ab6e6a55d495 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; @@ -40,7 +41,8 @@ protected GetRollupJobsAction.JobWrapper createTestInstance() { } return new GetRollupJobsAction.JobWrapper(ConfigTestHelpers.randomRollupJobConfig(random()), - new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()), new RollupJobStatus(state, Collections.emptyMap(), randomBoolean())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java new file mode 100644 index 0000000000000..81f31e2e5c4eb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +public class RollupIndexerJobStatsTests extends AbstractSerializingTestCase { + + @Override + protected RollupIndexerJobStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupIndexerJobStats::new; + } + + @Override + protected RollupIndexerJobStats doParseInstance(XContentParser parser) { + return RollupIndexerJobStats.fromXContent(parser); + } + + public static RollupIndexerJobStats randomStats() { + return new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java deleted file mode 100644 index 0091b21dc40d0..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; - -public class RollupJobStatsTests extends AbstractSerializingTestCase { - - @Override - protected RollupJobStats createTestInstance() { - return randomStats(); - } - - @Override - protected Writeable.Reader instanceReader() { - return RollupJobStats::new; - } - - @Override - protected RollupJobStats doParseInstance(XContentParser parser) { - return RollupJobStats.fromXContent(parser); - } - - public static RollupJobStats randomStats() { - return new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), randomNonNegativeLong()); - } -} - diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java index 2c802a7e41dc3..f46bda788bf5b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.util.HashMap; import java.util.Map; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java index ccdd616df7b51..b0e33579eb353 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java @@ -9,19 +9,16 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomTermsGroupConfig; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class TermsGroupConfigSerializingTests extends AbstractSerializingTestCase { @@ -77,62 +74,4 @@ public void testValidateFieldWrongType() { assertThat(e.validationErrors().get(0), equalTo("The field referenced by a terms group must be a [numeric] or " + "[keyword/text] type, but found [geo_point] for field [my_field]")); } - - public void testValidateFieldMatchingNotAggregatable() { - ActionRequestValidationException e = new ActionRequestValidationException(); - Map> responseMap = new HashMap<>(); - - // Have to mock fieldcaps because the ctor's aren't public... - FieldCapabilities fieldCaps = mock(FieldCapabilities.class); - when(fieldCaps.isAggregatable()).thenReturn(false); - responseMap.put("my_field", Collections.singletonMap(getRandomType(), fieldCaps)); - - TermsGroupConfig config = new TermsGroupConfig("my_field"); - config.validateMappings(responseMap, e); - assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); - } - - public void testValidateMatchingField() { - ActionRequestValidationException e = new ActionRequestValidationException(); - Map> responseMap = new HashMap<>(); - String type = getRandomType(); - - // Have to mock fieldcaps because the ctor's aren't public... - FieldCapabilities fieldCaps = mock(FieldCapabilities.class); - when(fieldCaps.isAggregatable()).thenReturn(true); - responseMap.put("my_field", Collections.singletonMap(type, fieldCaps)); - - TermsGroupConfig config = new TermsGroupConfig("my_field"); - config.validateMappings(responseMap, e); - if (e.validationErrors().size() != 0) { - fail(e.getMessage()); - } - - List> builders = config.toBuilders(); - assertThat(builders.size(), equalTo(1)); - } - - private String getRandomType() { - int n = randomIntBetween(0,8); - if (n == 0) { - return "keyword"; - } else if (n == 1) { - return "text"; - } else if (n == 2) { - return "long"; - } else if (n == 3) { - return "integer"; - } else if (n == 4) { - return "short"; - } else if (n == 5) { - return "float"; - } else if (n == 6) { - return "double"; - } else if (n == 7) { - return "scaled_float"; - } else if (n == 8) { - return "half_float"; - } - return "long"; - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java new file mode 100644 index 0000000000000..5ab7b805cc1f7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.mockito.ArgumentCaptor; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.any; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class SchedulerEngineTests extends ESTestCase { + + public void testListenersThrowingExceptionsDoNotCauseOtherListenersToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + int numberOfFailingListeners = 0; + final CountDownLatch latch = new CountDownLatch(numberOfListeners); + + for (int i = 0; i < numberOfListeners; i++) { + final AtomicBoolean trigger = new AtomicBoolean(); + final SchedulerEngine.Listener listener; + if (randomBoolean()) { + listener = event -> { + if (trigger.compareAndSet(false, true)) { + latch.countDown(); + } else { + fail("listener invoked twice"); + } + }; + } else { + numberOfFailingListeners++; + listener = event -> { + if (trigger.compareAndSet(false, true)) { + // we count down the latch after this exception is caught and mock logged in SchedulerEngine#notifyListeners + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked twice"); + } + }; + doAnswer(invocationOnMock -> { + // this happens after the listener has been notified, threw an exception, and then mock logged the exception + latch.countDown(); + return null; + }).when(mockLogger).warn(argThat(any(ParameterizedMessage.class)), argThat(any(RuntimeException.class))); + } + listeners.add(Tuple.tuple(listener, trigger)); + } + + // randomize the order and register the listeners + Collections.shuffle(listeners, random()); + listeners.stream().map(Tuple::v1).forEach(engine::register); + + final AtomicBoolean scheduled = new AtomicBoolean(); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + // only allow one triggering of the listeners + if (scheduled.compareAndSet(false, true)) { + return 0; + } else { + return -1; + } + })); + + latch.await(); + + // now check that every listener was invoked + assertTrue(listeners.stream().map(Tuple::v2).allMatch(AtomicBoolean::get)); + if (numberOfFailingListeners > 0) { + assertFailedListenerLogMessage(mockLogger, numberOfFailingListeners); + } + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + public void testListenersThrowingExceptionsDoNotCauseNextScheduledTaskToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + final int numberOfSchedules = randomIntBetween(1, 32); + final CountDownLatch listenersLatch = new CountDownLatch(numberOfSchedules * numberOfListeners); + for (int i = 0; i < numberOfListeners; i++) { + final AtomicInteger triggerCount = new AtomicInteger(); + final SchedulerEngine.Listener listener = event -> { + if (triggerCount.incrementAndGet() <= numberOfSchedules) { + listenersLatch.countDown(); + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked more than [" + numberOfSchedules + "] times"); + } + }; + listeners.add(Tuple.tuple(listener, triggerCount)); + engine.register(listener); + } + + // latch for each invocation of nextScheduledTimeAfter, once for each scheduled run, and then a final time when we disable + final CountDownLatch latch = new CountDownLatch(1 + numberOfSchedules); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (latch.getCount() >= 2) { + latch.countDown(); + return 0; + } else if (latch.getCount() == 1) { + latch.countDown(); + return -1; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + listenersLatch.await(); + assertTrue(listeners.stream().map(Tuple::v2).allMatch(count -> count.get() == numberOfSchedules)); + latch.await(); + assertFailedListenerLogMessage(mockLogger, numberOfSchedules * numberOfListeners); + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + private void assertFailedListenerLogMessage(Logger mockLogger, int times) { + final ArgumentCaptor messageCaptor = ArgumentCaptor.forClass(ParameterizedMessage.class); + final ArgumentCaptor throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + verify(mockLogger, times(times)).warn(messageCaptor.capture(), throwableCaptor.capture()); + for (final ParameterizedMessage message : messageCaptor.getAllValues()) { + assertThat(message.getFormat(), equalTo("listener failed while handling triggered event [{}]")); + assertThat(message.getParameters(), arrayWithSize(1)); + assertThat(message.getParameters()[0], equalTo(getTestName())); + } + for (final Throwable throwable : throwableCaptor.getAllValues()) { + assertThat(throwable, instanceOf(RuntimeException.class)); + assertThat(throwable.getMessage(), equalTo(getTestName())); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index ae458cbb2f5ed..a68a522f0242c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -58,11 +58,17 @@ public void testSerialization() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); + if (randomBoolean()) { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + logger.info("Serializing with version {}", version); + out.setVersion(version); + } original.writeTo(out); final PutRoleRequest copy = new PutRoleRequest(); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); + in.setVersion(out.getVersion()); copy.readFrom(in); assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); @@ -72,7 +78,7 @@ public void testSerializationV63AndBefore() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_3_2); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_2); out.setVersion(version); original.writeTo(out); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java similarity index 78% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java index 4404526328448..bd23198e8eae7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.action.token; +package org.elasticsearch.xpack.core.security.action.token; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.settings.SecureString; @@ -20,7 +20,7 @@ public void testRequestValidation() { ActionRequestValidationException ve = request.validate(); assertNotNull(ve); assertEquals(1, ve.validationErrors().size()); - assertThat(ve.validationErrors().get(0), containsString("[password, refresh_token]")); + assertThat(ve.validationErrors().get(0), containsString("[password, refresh_token, client_credentials]")); assertThat(ve.validationErrors().get(0), containsString("grant_type")); request.setGrantType("password"); @@ -72,5 +72,19 @@ public void testRequestValidation() { assertNotNull(ve); assertEquals(1, ve.validationErrors().size()); assertThat(ve.validationErrors(), hasItem("refresh_token is missing")); + + request.setGrantType("client_credentials"); + ve = request.validate(); + assertNull(ve); + + request.setUsername(randomAlphaOfLengthBetween(1, 32)); + request.setPassword(new SecureString(randomAlphaOfLengthBetween(1, 32).toCharArray())); + request.setRefreshToken(randomAlphaOfLengthBetween(1, 32)); + ve = request.validate(); + assertNotNull(ve); + assertEquals(3, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem(containsString("username is not supported"))); + assertThat(ve.validationErrors(), hasItem(containsString("password is not supported"))); + assertThat(ve.validationErrors(), hasItem(containsString("refresh_token is not supported"))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java new file mode 100644 index 0000000000000..b784310fdb2a8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +public class CreateTokenResponseTests extends ESTestCase { + + public void testSerialization() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomAlphaOfLengthBetween(1, 10)); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + + response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", null); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + } + + public void testSerializationToPre62Version() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomBoolean() ? null : randomAlphaOfLengthBetween(1, 10)); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_1_4); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertNull(serialized.getRefreshToken()); + assertEquals(response.getTokenString(), serialized.getTokenString()); + assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); + assertEquals(response.getScope(), serialized.getScope()); + } + } + } + + public void testSerializationToPost62Pre65Version() throws Exception { + CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", randomAlphaOfLengthBetween(1, 10)); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.V_6_4_0); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals(response, serialized); + } + } + + // no refresh token + response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), + randomBoolean() ? null : "FULL", null); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + input.setVersion(version); + CreateTokenResponse serialized = new CreateTokenResponse(); + serialized.readFrom(input); + assertEquals("", serialized.getRefreshToken()); + assertEquals(response.getTokenString(), serialized.getTokenString()); + assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); + assertEquals(response.getScope(), serialized.getScope()); + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java index f458311e68537..a6706542e9613 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; @@ -28,9 +29,10 @@ public class HasPrivilegesRequestTests extends ESTestCase { - public void testSerializationV7() throws IOException { + public void testSerializationV64OrLater() throws IOException { final HasPrivilegesRequest original = randomRequest(); - final HasPrivilegesRequest copy = serializeAndDeserialize(original, Version.V_7_0_0_alpha1); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesRequest copy = serializeAndDeserialize(original, version); assertThat(copy.username(), equalTo(original.username())); assertThat(copy.clusterPrivileges(), equalTo(original.clusterPrivileges())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java new file mode 100644 index 0000000000000..89c58945badd0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class HasPrivilegesResponseTests extends ESTestCase { + + public void testSerializationV64OrLater() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, version); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges())); + } + + public void testSerializationV63() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, Version.V_6_3_0); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap())); + } + + private HasPrivilegesResponse serializeAndDeserialize(HasPrivilegesResponse original, Version version) throws IOException { + logger.info("Test serialize/deserialize with version {}", version); + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + original.writeTo(out); + + final HasPrivilegesResponse copy = new HasPrivilegesResponse(); + final StreamInput in = out.bytes().streamInput(); + in.setVersion(version); + copy.readFrom(in); + assertThat(in.read(), equalTo(-1)); + return copy; + } + + private HasPrivilegesResponse randomResponse() { + final Map cluster = new HashMap<>(); + for (String priv : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { + cluster.put(priv, randomBoolean()); + } + final Collection index = randomResourcePrivileges(); + final Map> application = new HashMap<>(); + for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + application.put(app, randomResourcePrivileges()); + } + return new HasPrivilegesResponse(randomBoolean(), cluster, index, application); + } + + private Collection randomResourcePrivileges() { + final Collection list = new ArrayList<>(); + for (String resource : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 6))) { + final Map privileges = new HashMap<>(); + for (String priv : randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))) { + privileges.put(priv, randomBoolean()); + } + list.add(new HasPrivilegesResponse.ResourcePrivileges(resource, privileges)); + } + return list; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index 38857e2170de4..dca2f37f3f224 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -80,9 +80,8 @@ public void cleanDirectory() throws Exception { bitsetFilterCache.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32457") public void testSearch() throws Exception { - IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig()); + IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random()))); Document document = new Document(); document.add(new StringField("field", "value1", Field.Store.NO)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index c26968ce54aa9..dccbd14c04704 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -76,7 +76,7 @@ import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 9cb5e25c5b8d1..9972fc7b74bcf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -94,6 +94,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.SystemUser; @@ -147,6 +148,7 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved(XPackUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(LogstashSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(BeatsSystemUser.ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved(APMSystemUser.ROLE_NAME), is(true)); } public void testIngestAdminRole() { @@ -628,6 +630,30 @@ public void testBeatsSystemRole() { is(false)); } + public void testAPMSystemRole() { + final TransportRequest request = mock(TransportRequest.class); + + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(APMSystemUser.ROLE_NAME); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role APMSystemRole = Role.builder(roleDescriptor, null).build(); + assertThat(APMSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true)); + assertThat(APMSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); + assertThat(APMSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true)); + + assertThat(APMSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); + + assertThat(APMSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); + assertThat(APMSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); + assertThat(APMSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), + is(false)); + } + public void testMachineLearningAdminRole() { final TransportRequest request = mock(TransportRequest.class); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java similarity index 52% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java index 2e3c67131df74..d652575bb9fc4 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/UserTests.java @@ -1,25 +1,12 @@ /* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. */ - -package org.elasticsearch.protocol.xpack.security; +package org.elasticsearch.xpack.core.security.user; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Collections; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java index b82275a883311..3134d42ce3621 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/PemUtilsTests.java @@ -32,6 +32,16 @@ public void testReadPKCS8RsaKey() throws Exception { assertThat(privateKey, equalTo(key)); } + public void testReadPKCS8RsaKeyWithBagAttrs() throws Exception { + Key key = getKeyFromKeystore("RSA"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath + ("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem"), ""::toCharArray); + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + public void testReadPKCS8DsaKey() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 3e36550e46f2b..df25b2fa1261b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -78,7 +78,6 @@ public void cleanup() throws Exception { /** * Tests reloading a keystore that is used in the KeyManager of SSLContext */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingKeyStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); final Path tempDir = createTempDir(); @@ -192,7 +191,6 @@ public void testPEMKeyConfigReloading() throws Exception { * Tests the reloading of SSLContext when the trust store is modified. The same store is used as a TrustStore (for the * reloadable SSLContext used in the HTTPClient) and as a KeyStore for the MockWebServer */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingTrustStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); Path tempDir = createTempDir(); @@ -479,7 +477,9 @@ private static MockWebServer getSslServer(Path keyStorePath, String keyStorePass try (InputStream is = Files.newInputStream(keyStorePath)) { keyStore.load(is, keyStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, keyStorePass.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -493,7 +493,9 @@ private static MockWebServer getSslServer(Path keyPath, Path certPath, String pa keyStore.load(null, password.toCharArray()); keyStore.setKeyEntry("testnode_ec", PemUtils.readPrivateKey(keyPath, password::toCharArray), password.toCharArray(), CertParsingUtils.readCertificates(Collections.singletonList(certPath))); - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, password.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, password.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); diff --git a/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem b/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem new file mode 100644 index 0000000000000..ce8299cd070fc --- /dev/null +++ b/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_with_bagattrs.pem @@ -0,0 +1,32 @@ +Bag Attributes + friendlyName: testnode_rsa + localKeyID: 54 69 6D 65 20 31 35 32 35 33 33 36 38 32 39 33 39 37 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDesZnVBuxbT4y7 +KtIuYx8MUq0sGQgVbxXSBG66sWDU9Qoo1HUyra0xXCONgRMBT9RjSIpk7OOC9g8q +ENNgFO179YdHVkrgJhW/tNBf+C0VAb+B79zu7SwtyH2nt9t378dmItL+sERkMiiG ++BS/O+cDz44hifDiS7Eqj/mJugAhLjWSUyD+UBObxXvUsxjryKeG3vX9mRCgAcqB +xH3PjI1i9DVaoobwMbwpE5eW2WXexOspuXnMmGfrrR6z/VmdHqe/C3rGdJOX+Y0c +yOR+/Vuzisn+nLeo/GJx2hIif8rKiNRyAdUXfx+4DLYJBN2NUbl9aP2LP6ZC8ubf +6qwhhB0XAgMBAAECggEBAKuzP6qSNfaJNTayY2/EmRHFRSP1ANiV17sgE8f6L3DC +pdypQtuaMSkXo4nc9SxTwqvyKFJ8m0ZENZj3dCJmwFyNCIqmLAD7HFW9MdRs40WJ +HYEv0aaeUyvRo6CHD74/r/w96XTZr0GZssmtyUFRDGNRyoJter7gIW9xprLcKHFr +YTmdaAXbOm5W/K3844EBouTYzYnZYWQjB3jT/g5dIic3AtLb5YfGlpaXXb74xTOU +BqY1uKonGiDCh0aXXRl2Ucyre6FWslNNy4cAAXm6/5GT6iMo7wDXQftvtyK2IszP +IFcOG6xcAaJjgZ5wvM3ch0qNhQi4vL7c4Bm5JS9meoECgYEA88ItaVrfm2osX/6/ +fA8wYxxYU5RQRyOgLuzBXoRkISynLJaLVj2gFOQxVQeUK++xK6R182RQatOJcWFT +WwmIL3CchCwnnXgPvMc51iFKY94DbdvrRatP8c5sSk7IQlpS3aVa7f7DCqexggr5 +3PYysuiLirL+n9I1oZiUxpsS6/cCgYEA6eCcDshQzb7UQfWy//BRMp7u6DDuq+54 +38kJIFsPX0/CGyWsiFYEac8VH7jaGof99j7Zuebeb50TX57ZCBEK2LaHe474ggkY +GGSoo3VWBn44A1P5ADaRGRwJ4/u79qAg0ldnyxFHWtW+Wbn11DoOg40rl+DOnFBJ +W+bWJn4az+ECgYEAzWduDt5lmLfiRs4LG4ZNFudWwq8y6o9ptsEIvRXArnfLM3Z0 +Waq6T4Bu1aD6Sf/EAuul/QAmB67TnbgOnqMsoBU7vuDaTQZT9JbI9Ni+r+Lwbs2n +tuCCEFgKxp8Wf1tPgriJJA3O2xauLNAE9x57YGk21Ry6FYD0coR5sdYRHscCgYEA +lGQM4Fw82K5RoqAwOK/T9RheYTha1v/x9ZtqjPr53/GNKQhYVhCtsCzSLFRvHhJX +EpyCLK/NRmgVWMBC2BloFmSJxd3K00bN4PxM+5mBQZFoHMR04qu8mH/vzpV0h2DG +Mm9+zZti+MFRi0CwNz2248T4ed8LeKaARS1LhxTQEkECgYBFsPNkfGWyP4zsgzFs +3tMgXnIgl3Lh+vnEIzVakASf3RZrSucJhA713u5L9YB64wPdVJp4YZIoEmHebP9J +Jt1f9ghcWk6ffUVBQJPmWuRbB/BU8SI+kgtf50Jnizbfm5qoQEt2UdGUbwU3P1+t +z4SnBvIZ3b2inN+Hwdm5onOBlw== +-----END PRIVATE KEY----- diff --git a/x-pack/plugin/deprecation/build.gradle b/x-pack/plugin/deprecation/build.gradle index 3746287d615ff..d89eb62e88492 100644 --- a/x-pack/plugin/deprecation/build.gradle +++ b/x-pack/plugin/deprecation/build.gradle @@ -10,7 +10,7 @@ esplugin { archivesBaseName = 'x-pack-deprecation' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" } run { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 0f54784a33f46..d496eea2f0d13 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,10 +7,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; @@ -23,153 +20,9 @@ import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class IndexDeprecationChecksTests extends ESTestCase { - - private static void assertSettingsAndIssue(String key, String value, DeprecationIssue expected) { - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .settings(settings(Version.V_5_6_0) - .put(key, value)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testCoerceBooleanDeprecation() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startObject("properties"); { - mapping.startObject("my_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_object"); { - mapping.startObject("properties"); { - mapping.startObject("my_inner_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_text"); { - mapping.field("type", "text"); - mapping.startObject("fields"); { - mapping.startObject("raw"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("testBooleanCoercion", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.INFO, - "Coercion of boolean fields", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_coercion_of_boolean_fields", - "[[type: testBooleanCoercion, field: my_boolean], [type: testBooleanCoercion, field: my_inner_boolean]," + - " [type: testBooleanCoercion, field: my_text, multifield: raw]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testMatchMappingTypeCheck() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startArray("dynamic_templates"); - { - mapping.startObject(); - { - mapping.startObject("integers"); - { - mapping.field("match_mapping_type", "UNKNOWN_VALUE"); - mapping.startObject("mapping"); - { - mapping.field("type", "integer"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endArray(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("test", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "Unrecognized match_mapping_type options not silently ignored", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_unrecognized_literal_match_mapping_type_literal_options_not_silently_ignored", - "[type: test, dynamicFieldDefinitionintegers, unknown match_mapping_type[UNKNOWN_VALUE]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testBaseSimilarityDefinedCheck() { - assertSettingsAndIssue("index.similarity.base.type", "classic", - new DeprecationIssue(DeprecationIssue.Level.WARNING, - "The base similarity is now ignored as coords and query normalization have been removed." + - "If provided, this setting will be ignored and issue a deprecation warning", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_similarity_settings", null)); - } - - public void testIndexStoreTypeCheck() { - assertSettingsAndIssue("index.store.type", "niofs", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "The default index.store.type has been removed. If you were using it, " + - "we advise that you simply remove it from your index settings and Elasticsearch" + - "will use the best store implementation for your operating system.", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_settings", null)); - } - public void testStoreThrottleSettingsCheck() { - assertSettingsAndIssue("index.store.throttle.max_bytes_per_sec", "32", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.max_bytes_per_sec]")); - assertSettingsAndIssue("index.store.throttle.type", "none", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.type]")); - } - - public void testSharedFileSystemSettingsCheck() { - assertSettingsAndIssue("index.shared_filesystem", "true", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "[index.shared_filesystem] setting should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/6.0/" + - "breaking_60_indices_changes.html#_shadow_replicas_have_been_removed", null)); - } - public void testDelimitedPayloadFilterCheck() throws IOException { Settings settings = settings( - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter") .put("index.analysis.filter.my_delimited_payload_filter.delimiter", "^") .put("index.analysis.filter.my_delimited_payload_filter.encoding", "identity").build(); @@ -183,4 +36,4 @@ public void testDelimitedPayloadFilterCheck() throws IOException { List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); assertEquals(singletonList(expected), issues); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index 2b0f592b72040..069bfa5fbbe2b 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -10,7 +10,8 @@ esplugin { archivesBaseName = 'x-pack-graph' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index 4eb136040e988..25f2511fbc0a0 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -24,6 +24,15 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -39,16 +48,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.graph.action.Connection; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.ArrayList; import java.util.HashMap; diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 3f11d0c72bd44..778eb261a0705 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java index 5bebef3d2d48c..a58d8e8a8b0c6 100644 --- a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java @@ -17,6 +17,11 @@ import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -24,12 +29,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreRequestBuilder; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.Collection; import java.util.Collections; diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index 2e158a90ac7ab..1057a1c8526fc 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -10,9 +10,9 @@ esplugin { archivesBaseName = 'x-pack-logstash' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - } run { diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 3602e1b359ec2..7c3594a06cfdd 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -40,7 +40,8 @@ compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try, compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // This should not be here testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') @@ -103,9 +104,19 @@ task internalClusterTest(type: RandomizedTestingTask, include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + // also add an "alias" task to make typing on the command line easier task icTest { dependsOn internalClusterTest diff --git a/x-pack/plugin/ml/log-structure-finder/build.gradle b/x-pack/plugin/ml/log-structure-finder/build.gradle index 9048a1c46860c..f5dff6dc8464d 100644 --- a/x-pack/plugin/ml/log-structure-finder/build.gradle +++ b/x-pack/plugin/ml/log-structure-finder/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' archivesBaseName = 'x-pack-log-structure-finder' @@ -31,6 +29,6 @@ artifacts { forbiddenApisMain { // log-structure-finder does not depend on server, so cannot forbid server methods - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java deleted file mode 100644 index cb9e6537252cd..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class CsvLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid CSV - * - It must contain at least two complete records - * - There must be at least two fields per record (otherwise files with no commas could be treated as CSV!) - * - Every CSV record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.EXCEL_PREFERENCE, "CSV"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java similarity index 97% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java rename to x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java index fd9d34096b2ed..2f7bb41d0bae7 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java @@ -29,17 +29,16 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -public class SeparatedValuesLogStructureFinder implements LogStructureFinder { +public class DelimitedLogStructureFinder implements LogStructureFinder { private static final int MAX_LEVENSHTEIN_COMPARISONS = 100; private final List sampleMessages; private final LogStructure structure; - static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(List explanation, String sample, - String charsetName, Boolean hasByteOrderMarker, - CsvPreference csvPreference, boolean trimFields) - throws IOException { + static DelimitedLogStructureFinder makeDelimitedLogStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker, CsvPreference csvPreference, + boolean trimFields) throws IOException { Tuple>, List> parsed = readRows(sample, csvPreference); List> rows = parsed.v1(); @@ -73,13 +72,14 @@ static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(L String preamble = Pattern.compile("\n").splitAsStream(sample).limit(lineNumbers.get(1)).collect(Collectors.joining("\n", "", "\n")); char delimiter = (char) csvPreference.getDelimiterChar(); - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.fromSeparator(delimiter)) + LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.DELIMITED) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble) .setNumLinesAnalyzed(lineNumbers.get(lineNumbers.size() - 1)) .setNumMessagesAnalyzed(sampleRecords.size()) .setHasHeaderRow(isHeaderInFile) + .setDelimiter(delimiter) .setInputFields(Arrays.stream(headerWithNamedBlanks).collect(Collectors.toList())); if (trimFields) { @@ -131,10 +131,10 @@ static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(L .setExplanation(explanation) .build(); - return new SeparatedValuesLogStructureFinder(sampleMessages, structure); + return new DelimitedLogStructureFinder(sampleMessages, structure); } - private SeparatedValuesLogStructureFinder(List sampleMessages, LogStructure structure) { + private DelimitedLogStructureFinder(List sampleMessages, LogStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java new file mode 100644 index 0000000000000..3e4c3ea225cf8 --- /dev/null +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.logstructurefinder; + +import org.supercsv.prefs.CsvPreference; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +public class DelimitedLogStructureFinderFactory implements LogStructureFinderFactory { + + private final CsvPreference csvPreference; + private final int minFieldsPerRow; + private final boolean trimFields; + + DelimitedLogStructureFinderFactory(char delimiter, int minFieldsPerRow, boolean trimFields) { + csvPreference = new CsvPreference.Builder('"', delimiter, "\n").build(); + this.minFieldsPerRow = minFieldsPerRow; + this.trimFields = trimFields; + } + + /** + * Rules are: + * - It must contain at least two complete records + * - There must be a minimum number of fields per record (otherwise files with no commas could be treated as CSV!) + * - Every record except the last must have the same number of fields + * The reason the last record is allowed to have fewer fields than the others is that + * it could have been truncated when the file was sampled. + */ + @Override + public boolean canCreateFromSample(List explanation, String sample) { + String formatName; + switch ((char) csvPreference.getDelimiterChar()) { + case ',': + formatName = "CSV"; + break; + case '\t': + formatName = "TSV"; + break; + default: + formatName = Character.getName(csvPreference.getDelimiterChar()).toLowerCase(Locale.ROOT) + " delimited values"; + break; + } + return DelimitedLogStructureFinder.canCreateFromSample(explanation, sample, minFieldsPerRow, csvPreference, formatName); + } + + @Override + public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + throws IOException { + return DelimitedLogStructureFinder.makeDelimitedLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, + csvPreference, trimFields); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java index 64a00d20899c1..ea8fe37e62f9f 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java @@ -27,37 +27,14 @@ public class LogStructure implements ToXContentObject { public enum Format { - JSON, XML, CSV, TSV, SEMI_COLON_SEPARATED_VALUES, PIPE_SEPARATED_VALUES, SEMI_STRUCTURED_TEXT; - - public Character separator() { - switch (this) { - case JSON: - case XML: - return null; - case CSV: - return ','; - case TSV: - return '\t'; - case SEMI_COLON_SEPARATED_VALUES: - return ';'; - case PIPE_SEPARATED_VALUES: - return '|'; - case SEMI_STRUCTURED_TEXT: - return null; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } + JSON, XML, DELIMITED, SEMI_STRUCTURED_TEXT; public boolean supportsNesting() { switch (this) { case JSON: case XML: return true; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: case SEMI_STRUCTURED_TEXT: return false; default: @@ -69,10 +46,7 @@ public boolean isStructured() { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return true; case SEMI_STRUCTURED_TEXT: return false; @@ -85,10 +59,7 @@ public boolean isSemiStructured() { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return false; case SEMI_STRUCTURED_TEXT: return true; @@ -97,38 +68,6 @@ public boolean isSemiStructured() { } } - public boolean isSeparatedValues() { - switch (this) { - case JSON: - case XML: - return false; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: - return true; - case SEMI_STRUCTURED_TEXT: - return false; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } - - public static Format fromSeparator(char separator) { - switch (separator) { - case ',': - return CSV; - case '\t': - return TSV; - case ';': - return SEMI_COLON_SEPARATED_VALUES; - case '|': - return PIPE_SEPARATED_VALUES; - default: - throw new IllegalArgumentException("No known format has separator [" + separator + "]"); - } - } - public static Format fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); } @@ -149,7 +88,7 @@ public String toString() { static final ParseField EXCLUDE_LINES_PATTERN = new ParseField("exclude_lines_pattern"); static final ParseField INPUT_FIELDS = new ParseField("input_fields"); static final ParseField HAS_HEADER_ROW = new ParseField("has_header_row"); - static final ParseField SEPARATOR = new ParseField("separator"); + static final ParseField DELIMITER = new ParseField("delimiter"); static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); static final ParseField GROK_PATTERN = new ParseField("grok_pattern"); static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp_field"); @@ -171,7 +110,7 @@ public String toString() { PARSER.declareString(Builder::setExcludeLinesPattern, EXCLUDE_LINES_PATTERN); PARSER.declareStringArray(Builder::setInputFields, INPUT_FIELDS); PARSER.declareBoolean(Builder::setHasHeaderRow, HAS_HEADER_ROW); - PARSER.declareString((p, c) -> p.setSeparator(c.charAt(0)), SEPARATOR); + PARSER.declareString((p, c) -> p.setDelimiter(c.charAt(0)), DELIMITER); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); PARSER.declareString(Builder::setGrokPattern, GROK_PATTERN); PARSER.declareString(Builder::setTimestampField, TIMESTAMP_FIELD); @@ -191,7 +130,7 @@ public String toString() { private final String excludeLinesPattern; private final List inputFields; private final Boolean hasHeaderRow; - private final Character separator; + private final Character delimiter; private final Boolean shouldTrimFields; private final String grokPattern; private final List timestampFormats; @@ -202,7 +141,7 @@ public String toString() { public LogStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, - Boolean hasHeaderRow, Character separator, Boolean shouldTrimFields, String grokPattern, String timestampField, + Boolean hasHeaderRow, Character delimiter, Boolean shouldTrimFields, String grokPattern, String timestampField, List timestampFormats, boolean needClientTimezone, Map mappings, List explanation) { @@ -216,7 +155,7 @@ public LogStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sample this.excludeLinesPattern = excludeLinesPattern; this.inputFields = (inputFields == null) ? null : Collections.unmodifiableList(new ArrayList<>(inputFields)); this.hasHeaderRow = hasHeaderRow; - this.separator = separator; + this.delimiter = delimiter; this.shouldTrimFields = shouldTrimFields; this.grokPattern = grokPattern; this.timestampField = timestampField; @@ -266,8 +205,8 @@ public Boolean getHasHeaderRow() { return hasHeaderRow; } - public Character getSeparator() { - return separator; + public Character getDelimiter() { + return delimiter; } public Boolean getShouldTrimFields() { @@ -322,8 +261,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (hasHeaderRow != null) { builder.field(HAS_HEADER_ROW.getPreferredName(), hasHeaderRow.booleanValue()); } - if (separator != null) { - builder.field(SEPARATOR.getPreferredName(), String.valueOf(separator)); + if (delimiter != null) { + builder.field(DELIMITER.getPreferredName(), String.valueOf(delimiter)); } if (shouldTrimFields != null) { builder.field(SHOULD_TRIM_FIELDS.getPreferredName(), shouldTrimFields.booleanValue()); @@ -349,7 +288,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public int hashCode() { return Objects.hash(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, timestampField, + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, explanation); } @@ -376,7 +315,7 @@ public boolean equals(Object other) { Objects.equals(this.excludeLinesPattern, that.excludeLinesPattern) && Objects.equals(this.inputFields, that.inputFields) && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && - Objects.equals(this.separator, that.separator) && + Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && Objects.equals(this.grokPattern, that.grokPattern) && Objects.equals(this.timestampField, that.timestampField) && @@ -397,7 +336,7 @@ public static class Builder { private String excludeLinesPattern; private List inputFields; private Boolean hasHeaderRow; - private Character separator; + private Character delimiter; private Boolean shouldTrimFields; private String grokPattern; private String timestampField; @@ -441,7 +380,6 @@ public Builder setHasByteOrderMarker(Boolean hasByteOrderMarker) { public Builder setFormat(Format format) { this.format = Objects.requireNonNull(format); - this.separator = format.separator(); return this; } @@ -465,13 +403,13 @@ public Builder setHasHeaderRow(Boolean hasHeaderRow) { return this; } - public Builder setShouldTrimFields(Boolean shouldTrimFields) { - this.shouldTrimFields = shouldTrimFields; + public Builder setDelimiter(Character delimiter) { + this.delimiter = delimiter; return this; } - public Builder setSeparator(Character separator) { - this.separator = separator; + public Builder setShouldTrimFields(Boolean shouldTrimFields) { + this.shouldTrimFields = shouldTrimFields; return this; } @@ -542,28 +480,22 @@ public LogStructure build() { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); } break; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: if (inputFields == null || inputFields.isEmpty()) { throw new IllegalArgumentException("Input fields must be specified for [" + format + "] structures."); } if (hasHeaderRow == null) { throw new IllegalArgumentException("Has header row must be specified for [" + format + "] structures."); } - Character expectedSeparator = format.separator(); - assert expectedSeparator != null; - if (expectedSeparator.equals(separator) == false) { - throw new IllegalArgumentException("Separator must be [" + expectedSeparator + "] for [" + format + - "] structures."); + if (delimiter == null) { + throw new IllegalArgumentException("Delimiter must be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); @@ -576,8 +508,8 @@ public LogStructure build() { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (shouldTrimFields != null) { throw new IllegalArgumentException("Should trim fields may not be specified for [" + format + "] structures."); @@ -607,7 +539,7 @@ public LogStructure build() { } return new LogStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, explanation); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java index 7f18445e505e3..e747a588dfd84 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java @@ -69,10 +69,10 @@ public final class LogStructureFinderManager { new JsonLogStructureFinderFactory(), new XmlLogStructureFinderFactory(), // ND-JSON will often also be valid (although utterly weird) CSV, so JSON must come before CSV - new CsvLogStructureFinderFactory(), - new TsvLogStructureFinderFactory(), - new SemiColonSeparatedValuesLogStructureFinderFactory(), - new PipeSeparatedValuesLogStructureFinderFactory(), + new DelimitedLogStructureFinderFactory(',', 2, false), + new DelimitedLogStructureFinderFactory('\t', 2, false), + new DelimitedLogStructureFinderFactory(';', 4, false), + new DelimitedLogStructureFinderFactory('|', 5, true), new TextLogStructureFinderFactory() )); @@ -163,9 +163,15 @@ CharsetMatch findCharset(List explanation, InputStream inputStream) thro // deduction algorithms on binary files is very slow as the binary files generally appear to // have very long lines. boolean spaceEncodingContainsZeroByte = false; - byte[] spaceBytes = " ".getBytes(name); - for (int i = 0; i < spaceBytes.length && spaceEncodingContainsZeroByte == false; ++i) { - spaceEncodingContainsZeroByte = (spaceBytes[i] == 0); + Charset charset = Charset.forName(name); + // Some character sets cannot be encoded. These are extremely rare so it's likely that + // they've been chosen based on incorrectly provided binary data. Therefore, err on + // the side of rejecting binary data. + if (charset.canEncode()) { + byte[] spaceBytes = " ".getBytes(charset); + for (int i = 0; i < spaceBytes.length && spaceEncodingContainsZeroByte == false; ++i) { + spaceEncodingContainsZeroByte = (spaceBytes[i] == 0); + } } if (containsZeroBytes && spaceEncodingContainsZeroByte == false) { explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java index b1dfee22ee64a..71a68c399910b 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java @@ -21,12 +21,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -final class LogStructureUtils { +public final class LogStructureUtils { - static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; - static final String MAPPING_TYPE_SETTING = "type"; - static final String MAPPING_FORMAT_SETTING = "format"; - static final String MAPPING_PROPERTIES_SETTING = "properties"; + public static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; + public static final String MAPPING_TYPE_SETTING = "type"; + public static final String MAPPING_FORMAT_SETTING = "format"; + public static final String MAPPING_PROPERTIES_SETTING = "properties"; // NUMBER Grok pattern doesn't support scientific notation, so we extend it private static final Grok NUMBER_GROK = new Grok(Grok.getBuiltinPatterns(), "^%{NUMBER}(?:[eE][+-]?[0-3]?[0-9]{1,2})?$"); diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index 085599de847f0..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class PipeSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - private static final CsvPreference PIPE_PREFERENCE = new CsvPreference.Builder('"', '|', "\n").build(); - - /** - * Rules are: - * - The file must be valid pipe (|) separated values - * - It must contain at least two complete records - * - There must be at least five fields per record (otherwise files with coincidental - * or no pipe characters could be treated as pipe separated) - * - Every pipe separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 5, PIPE_PREFERENCE, "pipe separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - PIPE_PREFERENCE, true); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index e0e80fa7465ba..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class SemiColonSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid semi-colon separated values - * - It must contain at least two complete records - * - There must be at least four fields per record (otherwise files with coincidental - * or no semi-colons could be treated as semi-colon separated) - * - Every semi-colon separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 4, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, "semi-colon separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java index 733b32346fbed..1b53a33f31ee4 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java @@ -23,13 +23,13 @@ public class TsvLogStructureFinderFactory implements LogStructureFinderFactory { */ @Override public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.TAB_PREFERENCE, "TSV"); + return DelimitedLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.TAB_PREFERENCE, "TSV"); } @Override public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, + return DelimitedLogStructureFinder.makeDelimitedLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, CsvPreference.TAB_PREFERENCE, false); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java deleted file mode 100644 index f53ee008d691e..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class CsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); - - // No need to check JSON or XML because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenCsv() { - - assertTrue(factory.canCreateFromSample(explanation, CSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenTsv() { - - assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java new file mode 100644 index 0000000000000..d9eadbc8f0fde --- /dev/null +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.logstructurefinder; + +public class DelimitedLogStructureFinderFactoryTests extends LogStructureTestCase { + + private LogStructureFinderFactory csvFactory = new DelimitedLogStructureFinderFactory(',', 2, false); + private LogStructureFinderFactory tsvFactory = new DelimitedLogStructureFinderFactory('\t', 2, false); + private LogStructureFinderFactory semiColonDelimitedfactory = new DelimitedLogStructureFinderFactory(';', 4, false); + private LogStructureFinderFactory pipeDelimitedFactory = new DelimitedLogStructureFinderFactory('|', 5, true); + + // CSV - no need to check JSON or XML because they come earlier in the order we check formats + + public void testCanCreateCsvFromSampleGivenCsv() { + + assertTrue(csvFactory.canCreateFromSample(explanation, CSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenTsv() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenSemiColonDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenPipeDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenText() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // TSV - no need to check JSON, XML or CSV because they come earlier in the order we check formats + + public void testCanCreateTsvFromSampleGivenTsv() { + + assertTrue(tsvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenSemiColonDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenPipeDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenText() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Semi-colon delimited - no need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats + + public void testCanCreateSemiColonDelimitedFromSampleGivenSemiColonDelimited() { + + assertTrue(semiColonDelimitedfactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenPipeDelimited() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenText() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Pipe delimited - no need to check JSON, XML, CSV, TSV or semi-colon delimited + // values because they come earlier in the order we check formats + + public void testCanCreatePipeDelimitedFromSampleGivenPipeDelimited() { + + assertTrue(pipeDelimitedFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreatePipeDelimitedFromSampleGivenText() { + + assertFalse(pipeDelimitedFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java similarity index 65% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java rename to x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java index b62832a0a19cb..57c297cf8d571 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java @@ -12,27 +12,27 @@ import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinFieldwiseCompareRows; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinDistance; +import static org.elasticsearch.xpack.ml.logstructurefinder.DelimitedLogStructureFinder.levenshteinFieldwiseCompareRows; +import static org.elasticsearch.xpack.ml.logstructurefinder.DelimitedLogStructureFinder.levenshteinDistance; import static org.hamcrest.Matchers.arrayContaining; -public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase { +public class DelimitedLogStructureFinderTests extends LogStructureTestCase { - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); + private LogStructureFinderFactory csvFactory = new DelimitedLogStructureFinderFactory(',', 2, false); public void testCreateConfigsGivenCompleteCsv() throws Exception { String sample = "time,message\n" + "2018-05-17T13:41:23,hello\n" + "2018-05-17T13:41:32,hello again\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -41,7 +41,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("time", "message"), structure.getInputFields()); @@ -55,15 +55,15 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception "\"hello\n" + "world\",2018-05-17T13:41:23,1\n" + "\"hello again\n"; // note that this last record is truncated - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -72,7 +72,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception } assertEquals("^\"?message\"?,\"?time\"?,\"?count\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("message", "time", "count"), structure.getInputFields()); @@ -88,15 +88,15 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -108,7 +108,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -126,15 +126,15 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -146,7 +146,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -161,15 +161,15 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { String sample = "\"pos_id\",\"trip_id\",\"latitude\",\"longitude\",\"altitude\",\"timestamp\"\n" + "\"1\",\"3\",\"4703.7815\",\"1527.4713\",\"359.9\",\"2017-01-19 16:19:04.742113\"\n" + "\"2\",\"3\",\"4703.7815\",\"1527.4714\",\"359.9\",\"2017-01-19 16:19:05.741890\"\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -179,7 +179,7 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { assertEquals("^\"?pos_id\"?,\"?trip_id\"?,\"?latitude\"?,\"?longitude\"?,\"?altitude\"?,\"?timestamp\"?", structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getInputFields()); @@ -195,8 +195,8 @@ public void testFindHeaderFromSampleGivenHeaderInSample() throws IOException { "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedLogStructureFinder.findHeaderFromSample(explanation, + DelimitedLogStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertTrue(header.v1()); assertThat(header.v2(), arrayContaining("time", "airline", "responsetime", "sourcetype")); @@ -208,8 +208,8 @@ public void testFindHeaderFromSampleGivenHeaderNotInSample() throws IOException "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedLogStructureFinder.findHeaderFromSample(explanation, + DelimitedLogStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertFalse(header.v1()); assertThat(header.v2(), arrayContaining("column1", "column2", "column3", "column4")); @@ -251,43 +251,43 @@ public void testLevenshteinCompareRows() { public void testLineHasUnescapedQuote() { - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); - - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); + + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); } public void testRowContainsDuplicateNonEmptyValues() { - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); + assertTrue(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); + assertTrue(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java index 39ef3b9eedbba..cdbffa8259e0c 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java @@ -29,14 +29,14 @@ public void testCanCreateFromSampleGivenTsv() { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java index 2f727747bbff3..917054919dd50 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java @@ -29,7 +29,7 @@ public void testCreateConfigsGivenGoodJson() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertNull(structure.getGrokPattern()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java index 1f8691de8cf65..520a55510c7a4 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java @@ -61,7 +61,7 @@ public void testMakeBestStructureGivenXml() throws Exception { public void testMakeBestStructureGivenCsv() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "time,message\n" + "2018-05-17T13:41:23,hello\n", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(SeparatedValuesLogStructureFinder.class)); + instanceOf(DelimitedLogStructureFinder.class)); } public void testMakeBestStructureGivenText() throws Exception { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java index 5f9a87ef2a7f2..6b718fef6c7ea 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java @@ -34,14 +34,14 @@ public abstract class LogStructureTestCase extends ESTestCase { "\"level\":\"INFO\",\"pid\":42,\"thread\":\"0x7fff7d2a8000\",\"message\":\"message 2\",\"class\":\"ml\"," + "\"method\":\"core::SomeNoiseMaker\",\"file\":\"Noisemaker.cc\",\"line\":333}\n"; - protected static final String PIPE_SEPARATED_VALUES_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + + protected static final String PIPE_DELIMITED_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + "listening on 0.0.0.0:9987, :::9987\n" + "2018-01-06 17:19:44.465252|INFO |VirtualServer |1 |client " + "'User1'(id:2) changed default admin channelgroup to 'Guest'(id:8)\n" + "2018-01-06 17:21:25.764368|INFO |VirtualServer |1 |client " + "'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel 'Default Channel'(id:1)"; - protected static final String SEMI_COLON_SEPARATED_VALUES_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + + protected static final String SEMI_COLON_DELIMITED_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + "\"timestamp\"\n" + "\"1\";\"3\";\"4703.7815\";\"1527.4713\";\"359.9\";\"2017-01-19 16:19:04.742113\"\n" + "\"2\";\"3\";\"4703.7815\";\"1527.4714\";\"359.9\";\"2017-01-19 16:19:05.741890\"\n" + diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java index 738928ed28a37..302946dcaa86c 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java @@ -43,14 +43,12 @@ protected LogStructure createTestInstance() { builder.setExcludeLinesPattern(randomAlphaOfLength(100)); } - if (format.isSeparatedValues() || (format.supportsNesting() && randomBoolean())) { + if (format == LogStructure.Format.DELIMITED || (format.supportsNesting() && randomBoolean())) { builder.setInputFields(Arrays.asList(generateRandomStringArray(10, 10, false, false))); } - if (format.isSeparatedValues()) { + if (format == LogStructure.Format.DELIMITED) { builder.setHasHeaderRow(randomBoolean()); - if (rarely()) { - builder.setSeparator(format.separator()); - } + builder.setDelimiter(randomFrom(',', '\t', ';', '|')); } if (format.isSemiStructured()) { builder.setGrokPattern(randomAlphaOfLength(100)); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 3fd2fb7840ac9..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class PipeSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new PipeSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV, TSV or semi-colon separated values because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 64dad7e078cdf..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class SemiColonSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new SemiColonSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java index 267ce375d6e94..c1b30cc749612 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java @@ -9,8 +9,8 @@ public class TextLogStructureFinderFactoryTests extends LogStructureTestCase { private LogStructureFinderFactory factory = new TextLogStructureFinderFactory(); - // No need to check JSON, XML, CSV, TSV, semi-colon separated values or pipe - // separated values because they come earlier in the order we check formats + // No need to check JSON, XML, CSV, TSV, semi-colon delimited values or pipe + // delimited values because they come earlier in the order we check formats public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java index 7c6a58bb68387..c9e153a82c437 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java @@ -34,7 +34,7 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java deleted file mode 100644 index 1c8acc14d3288..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class TsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new TsvLogStructureFinderFactory(); - - // No need to check JSON, XML or CSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenTsv() { - - assertTrue(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java index 27eb4ede040b0..b6dc3a56f1dfb 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java @@ -26,14 +26,14 @@ public void testCanCreateFromSampleGivenTsv() { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java index 0d04df152ef00..de653d7bcd0cd 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java @@ -29,7 +29,7 @@ public void testCreateConfigsGivenGoodXml() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\s* features = (Map) entityAsMap(response).get("features"); + Map ml = (Map) features.get("ml"); + assertNotNull(ml); + assertTrue((Boolean) ml.get("available")); + assertTrue((Boolean) ml.get("enabled")); + } + + public void testInvalidJob() throws Exception { + // The job name is invalid because it contains a space + String jobId = "invalid job"; + ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); + assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); + // If validation of the invalid job is not done until after transportation to the master node then the + // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to + // validate before transportation to avoid this. This test must be done in a multi-node cluster to have + // a chance of catching a problem, hence it is here rather than in the single node integration tests. + assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); + } + + public void testMiniFarequote() throws Exception { + String jobId = "mini-farequote-job"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addData = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addData.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addData); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(2, responseBody.get("processed_record_count")); + assertEquals(4, responseBody.get("processed_field_count")); + assertEquals(177, responseBody.get("input_bytes")); + assertEquals(6, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + Response closeResponse = client().performRequest(closeRequest); + assertEquals(Collections.singletonMap("closed", true), entityAsMap(closeResponse)); + + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + assertEquals(4, dataCountsDoc.get("processed_field_count")); + assertEquals(177, dataCountsDoc.get("input_bytes")); + assertEquals(6, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(0, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteWithDatafeeder() throws Exception { + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + createAirlineDataRequest.setJsonEntity("{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time\": { \"type\":\"date\"}," + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"); + client().performRequest(createAirlineDataRequest); + Request airlineData1 = new Request("PUT", "/airline-data/response/1"); + airlineData1.setJsonEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}"); + client().performRequest(airlineData1); + Request airlineData2 = new Request("PUT", "/airline-data/response/2"); + airlineData2.setJsonEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}"); + client().performRequest(airlineData2); + + // Ensure all data is searchable + client().performRequest(new Request("POST", "/_refresh")); + + String jobId = "mini-farequote-with-data-feeder-job"; + createFarequoteJob(jobId); + String datafeedId = "bar"; + createDatafeed(datafeedId, jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "0"); + Response startResponse = client().performRequest(startRequest); + assertEquals(Collections.singletonMap("started", true), entityAsMap(startResponse)); + + assertBusy(() -> { + try { + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("input_record_count")); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + Response stopResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); + assertEquals(Collections.singletonMap("stopped", true), entityAsMap(stopResponse)); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteReopen() throws Exception { + String jobId = "mini-farequote-reopen"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + + "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + + "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + + "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addDataRequest); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(5, responseBody.get("processed_record_count")); + assertEquals(10, responseBody.get("processed_field_count")); + assertEquals(446, responseBody.get("input_bytes")); + assertEquals(15, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + Request statsRequest = new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + client().performRequest(statsRequest); + + Request openRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + openRequest.addParameter("timeout", "20s"); + Response openResponse2 = client().performRequest(openRequest); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse2)); + + // feed some more data points + Request addDataRequest2 = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest2.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + + "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + + "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + + "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + + "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse2 = client().performRequest(addDataRequest2); + assertEquals(202, addDataResponse2.getStatusLine().getStatusCode()); + Map responseBody2 = entityAsMap(addDataResponse2); + assertEquals(5, responseBody2.get("processed_record_count")); + assertEquals(10, responseBody2.get("processed_field_count")); + assertEquals(442, responseBody2.get("input_bytes")); + assertEquals(15, responseBody2.get("input_field_count")); + assertEquals(0, responseBody2.get("invalid_date_count")); + assertEquals(0, responseBody2.get("missing_field_count")); + assertEquals(0, responseBody2.get("out_of_order_timestamp_count")); + assertEquals(1000, responseBody2.get("bucket_count")); + + // unintuitive: should return the earliest record timestamp of this feed??? + assertEquals(null, responseBody2.get("earliest_record_timestamp")); + assertEquals(1407082000000L, responseBody2.get("latest_record_timestamp")); + + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + // counts should be summed up + Response statsResponse = client().performRequest(statsRequest); + + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(10, dataCountsDoc.get("processed_record_count")); + assertEquals(20, dataCountsDoc.get("processed_field_count")); + assertEquals(888, dataCountsDoc.get("input_bytes")); + assertEquals(30, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(1000, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + private Response createDatafeed(String datafeedId, String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("job_id", jobId); + xContentBuilder.array("indexes", "airline-data"); + xContentBuilder.array("types", "response"); + xContentBuilder.field("_source", true); + xContentBuilder.endObject(); + Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private Response createFarequoteJob(String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("job_id", jobId); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8")); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) + throws IOException { + Map asMap = entityAsMap(response); + assertThat(asMap.size(), equalTo(2)); + assertThat(asMap.get("flushed"), is(true)); + assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); + } +} diff --git a/x-pack/plugin/ml/qa/build.gradle b/x-pack/plugin/ml/qa/build.gradle new file mode 100644 index 0000000000000..517c93cc17862 --- /dev/null +++ b/x-pack/plugin/ml/qa/build.gradle @@ -0,0 +1,31 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + // HACK: please fix this + // we want to add the rest api specs for xpack to qa tests, but we + // need to wait until after the project is evaluated to only apply + // to those that rest tests. this used to be done automatically + // when xpack was a plugin, but now there is no place with xpack as a module. + // instead, we should package these and make them easy to use for rest tests, + // but currently, they must be copied into the resources of the test runner. + project.tasks.withType(RestIntegTestTask) { + File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' + } + } +} + +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + project.precommit.dependsOn.remove('dependenciesInfo') + } + } +} diff --git a/x-pack/qa/ml-disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle similarity index 80% rename from x-pack/qa/ml-disabled/build.gradle rename to x-pack/plugin/ml/qa/disabled/build.gradle index e914def3507cd..a24036651d504 100644 --- a/x-pack/qa/ml-disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java new file mode 100644 index 0000000000000..170b4f14486bc --- /dev/null +++ b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class MlPluginDisabledIT extends ESRestTestCase { + + /** + * Check that when the ml plugin is disabled, you cannot create a job as the + * rest handler is not registered + */ + public void testActionsFail() throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("actions-fail-job", "foo"); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/foo"); + request.setJsonEntity(Strings.toString(xContentBuilder)); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); + } +} diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle similarity index 98% rename from x-pack/qa/smoke-test-ml-with-security/build.gradle rename to x-pack/plugin/ml/qa/ml-with-security/build.gradle index 84c23add25411..a702973fcb02d 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } diff --git a/x-pack/qa/smoke-test-ml-with-security/roles.yml b/x-pack/plugin/ml/qa/ml-with-security/roles.yml similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/roles.yml rename to x-pack/plugin/ml/qa/ml-with-security/roles.yml diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle similarity index 95% rename from x-pack/qa/ml-native-multi-node-tests/build.gradle rename to x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index b1893b20c465b..0c4304b123ea9 100644 --- a/x-pack/qa/ml-native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -4,7 +4,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ml'), configuration: 'runtime') testCompile project(path: xpackModule('ml'), configuration: 'testArtifacts') diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java similarity index 92% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java index 80afdeff82ad8..cc5a9f4f1b469 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -36,7 +37,11 @@ public void testDefaultRenormalization() throws Exception { String jobId = "basic-renormalization-it-test-default-renormalization-job"; createAndRunJob(jobId, null); - List records = getRecords(jobId); + GetRecordsAction.Request getRecordsRequest = new GetRecordsAction.Request(jobId); + // Setting the record score to 10.0, to avoid the low score records due to multibucket trailing effect + getRecordsRequest.setRecordScore(10.0); + + List records = getRecords(getRecordsRequest); assertThat(records.size(), equalTo(2)); AnomalyRecord laterRecord = records.get(0); assertThat(laterRecord.getActual().get(0), equalTo(100.0)); diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java similarity index 56% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 54d8090a7a421..7a93ecdd9e1cd 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -22,10 +22,7 @@ import org.junit.After; import org.junit.Before; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.Date; @@ -36,6 +33,7 @@ import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class DatafeedJobsRestIT extends ESRestTestCase { @@ -57,26 +55,24 @@ protected boolean preserveTemplatesUponCompletion() { } private void setupDataAccessRole(String index) throws IOException { - String json = "{" + Request request = new Request("PUT", "/_xpack/security/role/test_data_access"); + request.setJsonEntity("{" + " \"indices\" : [" + " { \"names\": [\"" + index + "\"], \"privileges\": [\"read\"] }" + " ]" - + "}"; - - client().performRequest("put", "_xpack/security/role/test_data_access", Collections.emptyMap(), - new StringEntity(json, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(request); } private void setupUser(String user, List roles) throws IOException { String password = new String(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING.getChars()); - String json = "{" + Request request = new Request("PUT", "/_xpack/security/user/" + user); + request.setJsonEntity("{" + " \"password\" : \"" + password + "\"," + " \"roles\" : [ " + roles.stream().map(unquoted -> "\"" + unquoted + "\"").collect(Collectors.joining(", ")) + " ]" - + "}"; - - client().performRequest("put", "_xpack/security/user/" + user, Collections.emptyMap(), - new StringEntity(json, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(request); } @Before @@ -92,7 +88,10 @@ public void setUpData() throws Exception { } private void addAirlineData() throws IOException { - String mappings = "{" + StringBuilder bulk = new StringBuilder(); + + Request createEmptyAirlineDataRequest = new Request("PUT", "/airline-data-empty"); + createEmptyAirlineDataRequest.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -102,12 +101,12 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-empty", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createEmptyAirlineDataRequest); // Create index with source = enabled, doc_values = enabled, stored = false + multi-field - mappings = "{" + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + createAirlineDataRequest.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -123,18 +122,17 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataRequest); - client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with source = enabled, doc_values = disabled (except time), stored = false - mappings = "{" + Request createAirlineDataDisabledDocValues = new Request("PUT", "/airline-data-disabled-doc-values"); + createAirlineDataDisabledDocValues.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -144,19 +142,17 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-disabled-doc-values", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataDisabledDocValues); - client().performRequest("put", "airline-data-disabled-doc-values/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-disabled-doc-values/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-doc-values\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-doc-values\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with source = disabled, doc_values = enabled (except time), stored = true - mappings = "{" + Request createAirlineDataDisabledSource = new Request("PUT", "/airline-data-disabled-source"); + createAirlineDataDisabledSource.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"_source\":{\"enabled\":false}," @@ -167,19 +163,16 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-disabled-source", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); - client().performRequest("put", "airline-data-disabled-source/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-disabled-source/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-source\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-disabled-source\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}\n"); // Create index with nested documents - mappings = "{" + Request createAirlineDataNested = new Request("PUT", "/nested-data"); + createAirlineDataNested.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -187,18 +180,17 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "nested-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}"); + client().performRequest(createAirlineDataNested); - client().performRequest("put", "nested-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\", \"responsetime\":{\"millis\":135.22}}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "nested-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"responsetime\":{\"millis\":222.0}}", - ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"nested-data\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time\":\"2016-06-01T00:00:00Z\", \"responsetime\":{\"millis\":135.22}}\n"); + bulk.append("{\"index\": {\"_index\": \"nested-data\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time\":\"2016-06-01T01:59:00Z\",\"responsetime\":{\"millis\":222.0}}\n"); // Create index with multiple docs per time interval for aggregation testing - mappings = "{" + Request createAirlineDataAggs = new Request("PUT", "/airline-data-aggs"); + createAirlineDataAggs.setJsonEntity("{" + " \"mappings\": {" + " \"response\": {" + " \"properties\": {" @@ -208,43 +200,33 @@ private void addAirlineData() throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", "airline-data-aggs", Collections.emptyMap(), - new StringEntity(mappings, ContentType.APPLICATION_JSON)); - - client().performRequest("put", "airline-data-aggs/response/1", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":100.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/2", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"AAA\",\"responsetime\":200.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/3", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"BBB\",\"responsetime\":1000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/4", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"BBB\",\"responsetime\":2000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/5", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"AAA\",\"responsetime\":300.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/6", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"AAA\",\"responsetime\":400.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/7", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"BBB\",\"responsetime\":3000.0}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data-aggs/response/8", Collections.emptyMap(), - new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"BBB\",\"responsetime\":4000.0}", - ContentType.APPLICATION_JSON)); - - // Ensure all data is searchable - client().performRequest("post", "_refresh"); + + "}"); + client().performRequest(createAirlineDataAggs); + + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 1}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":100.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 2}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"AAA\",\"responsetime\":200.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 3}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"BBB\",\"responsetime\":1000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 4}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"BBB\",\"responsetime\":2000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 5}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"AAA\",\"responsetime\":300.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 6}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"AAA\",\"responsetime\":400.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 7}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"BBB\",\"responsetime\":3000.0}\n"); + bulk.append("{\"index\": {\"_index\": \"airline-data-aggs\", \"_type\": \"response\", \"_id\": 8}}\n"); + bulk.append("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"BBB\",\"responsetime\":4000.0}\n"); + + bulkIndex(bulk.toString()); } private void addNetworkData(String index) throws IOException { - // Create index with source = enabled, doc_values = enabled, stored = false + multi-field - String mappings = "{" + Request createIndexRequest = new Request("PUT", index); + createIndexRequest.setJsonEntity("{" + " \"mappings\": {" + " \"doc\": {" + " \"properties\": {" @@ -260,27 +242,25 @@ private void addNetworkData(String index) throws IOException { + " }" + " }" + " }" - + "}"; - client().performRequest("put", index, Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + "}");; + client().performRequest(createIndexRequest); + StringBuilder bulk = new StringBuilder(); String docTemplate = "{\"timestamp\":%d,\"host\":\"%s\",\"network_bytes_out\":%d}"; Date date = new Date(1464739200735L); - for (int i=0; i<120; i++) { + for (int i = 0; i < 120; i++) { long byteCount = randomNonNegativeLong(); - String jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount); - client().performRequest("post", index + "/doc", Collections.emptyMap(), - new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + bulk.append(String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount)).append('\n'); byteCount = randomNonNegativeLong(); - jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount); - client().performRequest("post", index + "/doc", Collections.emptyMap(), - new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + bulk.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + bulk.append(String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount)).append('\n'); date = new Date(date.getTime() + 10_000); } - // Ensure all data is searchable - client().performRequest("post", "_refresh"); + bulkIndex(bulk.toString()); } public void testLookbackOnlyWithMixedTypes() throws Exception { @@ -314,11 +294,21 @@ public void testLookbackOnlyWithScriptFields() throws Exception { public void testLookbackOnlyWithNestedFields() throws Exception { String jobId = "test-lookback-only-with-nested-fields"; - String job = "{\"description\":\"Nested job\", \"analysis_config\" : {\"bucket_span\":\"1h\",\"detectors\" :" - + "[{\"function\":\"mean\",\"field_name\":\"responsetime.millis\"}]}, \"data_description\" : {\"time_field\":\"time\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Nested job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime.millis\"\n" + + " }\n" + + " ]\n" + + " }," + + " \"data_description\": {\"time_field\": \"time\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = jobId + "-datafeed"; new DatafeedBuilder(datafeedId, jobId, "nested-data", "response").build(); @@ -326,8 +316,9 @@ public void testLookbackOnlyWithNestedFields() throws Exception { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -340,14 +331,23 @@ public void testLookbackOnlyGivenEmptyIndex() throws Exception { public void testInsufficientSearchPrivilegesOnPut() throws Exception { String jobId = "privs-put-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\"," - + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n " + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\":\"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\" : {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; // This should be disallowed, because even though the ml_admin user has permission to @@ -365,14 +365,23 @@ public void testInsufficientSearchPrivilegesOnPut() throws Exception { public void testInsufficientSearchPrivilegesOnPreview() throws Exception { String jobId = "privs-preview-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\"," - + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\" : {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").build(); @@ -380,10 +389,11 @@ public void testInsufficientSearchPrivilegesOnPreview() throws Exception { // This should be disallowed, because ml_admin is trying to preview a datafeed created by // by another user (x_pack_rest_user in this case) that will reveal the content of an index they // don't have permission to search directly - ResponseException e = expectThrows(ResponseException.class, () -> - client().performRequest("get", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_preview", - new BasicHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN))); + Request getFeed = new Request("GET", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_preview"); + RequestOptions.Builder options = getFeed.getOptions().toBuilder(); + options.addHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN); + getFeed.setOptions(options); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getFeed)); assertThat(e.getMessage(), containsString("[indices:data/read/field_caps] is unauthorized for user [ml_admin]")); @@ -391,13 +401,23 @@ public void testInsufficientSearchPrivilegesOnPreview() throws Exception { public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { String jobId = "aggs-histogram-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"buckets\":{\"histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," @@ -410,8 +430,9 @@ public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -419,13 +440,23 @@ public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exception { String jobId = "aggs-date-histogram-job"; - String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"3600s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"3600s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"1h\"}," @@ -438,8 +469,9 @@ public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exceptio startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); @@ -447,13 +479,22 @@ public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exceptio public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate() throws Exception { String jobId = "derivative-agg-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -471,8 +512,9 @@ public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate( startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":40")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":40")); assertThat(jobStatsResponseAsString, containsString("\"out_of_order_timestamp_count\":0")); @@ -483,13 +525,22 @@ public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate( public void testLookbackUsingDerivativeAggWithSmallerHistogramBucketThanDataRate() throws Exception { String jobId = "derivative-agg-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -507,21 +558,31 @@ public void testLookbackUsingDerivativeAggWithSmallerHistogramBucketThanDataRate startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":240")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":240")); } public void testLookbackWithoutPermissions() throws Exception { String jobId = "permission-test-network-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," - + "\"data_description\" : {\"time_field\":\"timestamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"300s\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"bytes-delta\",\n" + + " \"by_field_name\": \"hostname\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"timestamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = @@ -545,29 +606,39 @@ public void testLookbackWithoutPermissions() throws Exception { startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); // We expect that no data made it through to the job assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":0")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":0")); // There should be a notification saying that there was a problem extracting data - client().performRequest("post", "_refresh"); - Response notificationsResponse = client().performRequest("get", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId); - String notificationsResponseAsString = responseEntityToString(notificationsResponse); + client().performRequest(new Request("POST", "/_refresh")); + Response notificationsResponse = client().performRequest( + new Request("GET", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId)); + String notificationsResponseAsString = EntityUtils.toString(notificationsResponse.getEntity()); assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " + "action [indices:data/read/search] is unauthorized for user [ml_admin_plus_data]\"")); } public void testLookbackWithPipelineBucketAgg() throws Exception { String jobId = "pipeline-bucket-agg-job"; - String job = "{\"analysis_config\" :{\"bucket_span\":\"1h\"," - + "\"summary_count_field_name\":\"doc_count\"," - + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"percentile95_airlines_count\"}]}," - + "\"data_description\" : {\"time_field\":\"time stamp\"}" - + "}"; - client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"percentile95_airlines_count\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"15m\"}," @@ -582,8 +653,9 @@ public void testLookbackWithPipelineBucketAgg() throws Exception { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); assertThat(jobStatsResponseAsString, containsString("\"input_field_count\":4")); assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); @@ -599,15 +671,15 @@ public void testRealtime() throws Exception { new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); openJob(client(), jobId); - Response response = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "2016-06-01T00:00:00Z"); + Response response = client().performRequest(startRequest); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"started\":true}")); assertBusy(() -> { try { - Response getJobResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String responseAsString = responseEntityToString(getJobResponse); + Response getJobResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String responseAsString = EntityUtils.toString(getJobResponse.getEntity()); assertThat(responseAsString, containsString("\"processed_record_count\":2")); assertThat(responseAsString, containsString("\"state\":\"opened\"")); } catch (Exception e1) { @@ -619,9 +691,9 @@ public void testRealtime() throws Exception { // test a model snapshot is present assertBusy(() -> { try { - Response getJobResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/model_snapshots"); - String responseAsString = responseEntityToString(getJobResponse); + Response getJobResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/model_snapshots")); + String responseAsString = EntityUtils.toString(getJobResponse.getEntity()); assertThat(responseAsString, containsString("\"count\":1")); } catch (Exception e1) { throw new RuntimeException(e1); @@ -629,25 +701,25 @@ public void testRealtime() throws Exception { }); ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + () -> client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId))); response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); - assertThat(responseEntityToString(response), containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId - + "] refers to it")); + assertThat(EntityUtils.toString(response.getEntity()), + containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId + "] refers to it")); - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); + response = client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"stopped\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"stopped\":true}")); - client().performRequest("POST", "/_xpack/ml/anomaly_detectors/" + jobId + "/_close"); + client().performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/" + jobId + "/_close")); - response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + response = client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + response = client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); } public void testForceDeleteWhileDatafeedIsRunning() throws Exception { @@ -657,25 +729,26 @@ public void testForceDeleteWhileDatafeedIsRunning() throws Exception { new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); openJob(client(), jobId); - Response response = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "2016-06-01T00:00:00Z"); + Response response = client().performRequest(startRequest); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"started\":true}")); ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + () -> client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId))); response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); - assertThat(responseEntityToString(response), containsString("Cannot delete datafeed [" + datafeedId - + "] while its status is started")); + assertThat(EntityUtils.toString(response.getEntity()), + containsString("Cannot delete datafeed [" + datafeedId + "] while its status is started")); - response = client().performRequest("delete", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "?force=true"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + Request forceDeleteRequest = new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + forceDeleteRequest.addParameter("force", "true"); + response = client().performRequest(forceDeleteRequest); + assertThat(EntityUtils.toString(response.getEntity()), equalTo("{\"acknowledged\":true}")); expectThrows(ResponseException.class, - () -> client().performRequest("get", "/_xpack/ml/datafeeds/" + datafeedId)); + () -> client().performRequest(new Request("GET", "/_xpack/ml/datafeeds/" + datafeedId))); } private class LookbackOnlyTestHelper { @@ -727,9 +800,9 @@ public void execute() throws Exception { startDatafeedAndWaitUntilStopped(datafeedId); waitUntilJobIsClosed(jobId); - Response jobStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); if (shouldSucceedInput) { assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); } else { @@ -748,16 +821,20 @@ private void startDatafeedAndWaitUntilStopped(String datafeedId) throws Exceptio } private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHeader) throws Exception { - Response startDatafeedRequest = client().performRequest("post", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z&end=2016-06-02T00:00:00Z", - new BasicHeader("Authorization", authHeader)); - assertThat(startDatafeedRequest.getStatusLine().getStatusCode(), equalTo(200)); - assertThat(responseEntityToString(startDatafeedRequest), equalTo("{\"started\":true}")); + Request request = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + request.addParameter("start", "2016-06-01T00:00:00Z"); + request.addParameter("end", "2016-06-02T00:00:00Z"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", authHeader); + request.setOptions(options); + Response startDatafeedResponse = client().performRequest(request); + assertThat(EntityUtils.toString(startDatafeedResponse.getEntity()), equalTo("{\"started\":true}")); assertBusy(() -> { try { - Response datafeedStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats"); - assertThat(responseEntityToString(datafeedStatsResponse), containsString("\"state\":\"stopped\"")); + Response datafeedStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats")); + assertThat(EntityUtils.toString(datafeedStatsResponse.getEntity()), + containsString("\"state\":\"stopped\"")); } catch (Exception e) { throw new RuntimeException(e); } @@ -767,9 +844,9 @@ private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHead private void waitUntilJobIsClosed(String jobId) throws Exception { assertBusy(() -> { try { - Response jobStatsResponse = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertThat(responseEntityToString(jobStatsResponse), containsString("\"state\":\"closed\"")); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + assertThat(EntityUtils.toString(jobStatsResponse.getEntity()), containsString("\"state\":\"closed\"")); } catch (Exception e) { throw new RuntimeException(e); } @@ -777,27 +854,30 @@ private void waitUntilJobIsClosed(String jobId) throws Exception { } private Response createJob(String id, String airlineVariant) throws Exception { - String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" - + " \"analysis_config\" : {\n" + " \"bucket_span\":\"1h\",\n" - + " \"detectors\" :[\n" - + " {\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"" + airlineVariant + "\"}]\n" - + " },\n" + " \"data_description\" : {\n" - + " \"format\":\"xcontent\",\n" - + " \"time_field\":\"time stamp\",\n" + " \"time_format\":\"yyyy-MM-dd'T'HH:mm:ssX\"\n" + " }\n" - + "}"; - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + id, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); - } - - private static String responseEntityToString(Response response) throws Exception { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { - return reader.lines().collect(Collectors.joining("\n")); - } + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + id); + request.setJsonEntity("{\n" + + " \"description\": \"Analysis of response time by airline\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"detectors\" :[\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"" + airlineVariant + "\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"format\": \"xcontent\",\n" + + " \"time_field\": \"time stamp\",\n" + + " \"time_format\": \"yyyy-MM-dd'T'HH:mm:ssX\"\n" + + " }\n" + + "}"); + return client().performRequest(request); } public static void openJob(RestClient client, String jobId) throws IOException { - Response response = client.performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + client.performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); } @After @@ -850,17 +930,28 @@ DatafeedBuilder setChunkingTimespan(String timespan) { } Response build() throws IOException { - String datafeedConfig = "{" + Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + request.setJsonEntity("{" + "\"job_id\": \"" + jobId + "\",\"indexes\":[\"" + index + "\"],\"types\":[\"" + type + "\"]" + (source ? ",\"_source\":true" : "") + (scriptedFields == null ? "" : ",\"script_fields\":" + scriptedFields) + (aggregations == null ? "" : ",\"aggs\":" + aggregations) + (chunkingTimespan == null ? "" : ",\"chunking_config\":{\"mode\":\"MANUAL\",\"time_span\":\"" + chunkingTimespan + "\"}") - + "}"; - return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, Collections.emptyMap(), - new StringEntity(datafeedConfig, ContentType.APPLICATION_JSON), - new BasicHeader("Authorization", authHeader)); + + "}"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", authHeader); + request.setOptions(options); + return client().performRequest(request); } } + + private void bulkIndex(String bulk) throws IOException { + Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulk); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.addParameter("pretty", null); + String bulkResponse = EntityUtils.toString(client().performRequest(bulkRequest).getEntity()); + assertThat(bulkResponse, not(containsString("\"errors\": false"))); + } } diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java new file mode 100644 index 0000000000000..5fc204cbf1f82 --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -0,0 +1,607 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.After; + +import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class MlJobIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + public void testPutJob_GivenFarequoteConfig() throws Exception { + Response response = createFarequoteJob("given-farequote-config-job"); + String responseAsString = EntityUtils.toString(response.getEntity()); + assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); + } + + public void testGetJob_GivenNoSuchJob() throws Exception { + ResponseException e = expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats"))); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'")); + } + + public void testGetJob_GivenJobExists() throws Exception { + createFarequoteJob("get-job_given-job-exists-job"); + + Response response = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats")); + String responseAsString = EntityUtils.toString(response.getEntity()); + assertThat(responseAsString, containsString("\"count\":1")); + assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\"")); + } + + public void testGetJobs_GivenSingleJob() throws Exception { + String jobId = "get-jobs_given-single-job-job"; + createFarequoteJob(jobId); + + // Explicit _all + String explictAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()); + assertThat(explictAll, containsString("\"count\":1")); + assertThat(explictAll, containsString("\"job_id\":\"" + jobId + "\"")); + + // Implicit _all + String implicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()); + assertThat(implicitAll, containsString("\"count\":1")); + assertThat(implicitAll, containsString("\"job_id\":\"" + jobId + "\"")); + } + + public void testGetJobs_GivenMultipleJobs() throws Exception { + createFarequoteJob("given-multiple-jobs-job-1"); + createFarequoteJob("given-multiple-jobs-job-2"); + createFarequoteJob("given-multiple-jobs-job-3"); + + // Explicit _all + String explicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()); + assertThat(explicitAll, containsString("\"count\":3")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + + // Implicit _all + String implicitAll = EntityUtils.toString( + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()); + assertThat(implicitAll, containsString("\"count\":3")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + } + + private Response createFarequoteJob(String jobId) throws IOException { + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + request.setJsonEntity( + "{\n" + + " \"description\":\"Analysis of response time by airline\",\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\": \"3600s\",\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]\n" + + " },\n" + " \"data_description\" : {\n" + + " \"field_delimiter\":\",\",\n" + + " \"time_field\":\"time\",\n" + + " \"time_format\":\"yyyy-MM-dd HH:mm:ssX\"\n" + + " }\n" + + "}"); + return client().performRequest(request); + } + + public void testCantCreateJobWithSameID() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobId = "cant-create-job-with-same-id-job"; + Request createJob1 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJob1.setJsonEntity(String.format(Locale.ROOT, jobTemplate, "index-1")); + client().performRequest(createJob1); + + Request createJob2 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJob2.setJsonEntity(String.format(Locale.ROOT, jobTemplate, "index-2")); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createJob2)); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used.")); + } + + public void testCreateJobsWithIndexNameOption() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobId1 = "create-jobs-with-index-name-option-job-1"; + String indexName = "non-default-index"; + Request createJob1 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1.setJsonEntity(String.format(Locale.ROOT, jobTemplate, indexName)); + client().performRequest(createJob1); + + String jobId2 = "create-jobs-with-index-name-option-job-2"; + Request createJob2 = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2.setEntity(createJob1.getEntity()); + client().performRequest(createJob2); + + // With security enabled GET _aliases throws an index_not_found_exception + // if no aliases have been created. In multi-node tests the alias may not + // appear immediately so wait here. + assertBusy(() -> { + try { + String aliasesResponse = EntityUtils.toString(client().performRequest(new Request("GET", "/_aliases")).getEntity()); + assertThat(aliasesResponse, + containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName) + "\":{\"aliases\":{")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId1 + "\",\"boost\":1.0}}}}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId1) + "\":{}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId2 + "\",\"boost\":1.0}}}}")); + assertThat(aliasesResponse, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId2) + "\":{}")); + } catch (ResponseException e) { + throw new AssertionError(e); + } + }); + + String responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(responseAsString, + containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)))); + + String id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1234", 300); + Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id); + createResultRequest.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1234", 1)); + client().performRequest(createResultRequest); + + id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1236", 300); + createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id); + createResultRequest.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1236", 1)); + client().performRequest(createResultRequest); + + client().performRequest(new Request("POST", "/_refresh")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets")).getEntity()); + assertThat(responseAsString, containsString("\"count\":2")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search")).getEntity()); + assertThat(responseAsString, containsString("\"total\":2")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1)); + + // check that indices still exist, but are empty and aliases are gone + responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_aliases")).getEntity()); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); //job2 still exists + + responseAsString = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + + client().performRequest(new Request("POST", "/_refresh")); + + responseAsString = EntityUtils.toString(client().performRequest( + new Request("GET", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count")).getEntity()); + assertThat(responseAsString, containsString("\"count\":0")); + } + + public void testCreateJobInSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "create-job-in-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + // Check the index mapping contains the first by_field_name + Request getResultsMappingRequest = new Request("GET", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping"); + getResultsMappingRequest.addParameter("pretty", null); + String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob1, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2))); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + client().performRequest(createJob2Request); + + // Check the index mapping now contains both fields + String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob2, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob2, containsString(byFieldName2)); + } + + public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"shared-index\"}"; + + String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + // Check the index mapping contains the first by_field_name + Request getResultsMappingRequest = new Request("GET", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index/_mapping"); + getResultsMappingRequest.addParameter("pretty", null); + String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob1, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2))); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + client().performRequest(createJob2Request); + + // Check the index mapping now contains both fields + String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity()); + assertThat(resultsMappingAfterJob2, containsString(byFieldName1)); + assertThat(resultsMappingAfterJob2, containsString(byFieldName2)); + } + + public void testCreateJob_WithClashingFieldMappingsFails() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "job-with-response-field"; + String byFieldName1; + String jobId2 = "job-will-fail-with-mapping-error-on-response-field"; + String byFieldName2; + // we should get the friendly advice nomatter which way around the clashing fields are seen + if (randomBoolean()) { + byFieldName1 = "response"; + byFieldName2 = "response.time"; + } else { + byFieldName1 = "response.time"; + byFieldName2 = "response"; + } + + Request createJob1Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + createJob1Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName1)); + client().performRequest(createJob1Request); + + Request createJob2Request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2); + createJob2Request.setJsonEntity(String.format(Locale.ROOT, jobTemplate, byFieldName2)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createJob2Request)); + assertThat(e.getMessage(), + containsString("This job would cause a mapping clash with existing field [response] - " + + "avoid the clash by assigning a dedicated results index")); + } + + public void testDeleteJob() throws Exception { + String jobId = "delete-job-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + // check that the index still exists (it's shared by default) + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, containsString(indexName)); + + assertBusy(() -> { + try { + String count = EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()); + assertThat(count, containsString("\"count\":0")); + } catch (Exception e) { + fail(e.getMessage()); + } + }); + + // check that the job itself is gone + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDeleteJobAfterMissingIndex() throws Exception { + String jobId = "delete-job-after-missing-index-job"; + String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + + // Manually delete the index so that we can test that deletion proceeds + // normally anyway + client().performRequest(new Request("DELETE", indexName)); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + // check index was deleted + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, not(containsString(aliasName))); + assertThat(indicesAfterDelete, not(containsString(indexName))); + + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDeleteJobAfterMissingAliases() throws Exception { + String jobId = "delete-job-after-missing-alias-job"; + String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + // With security enabled cat aliases throws an index_not_found_exception + // if no aliases have been created. In multi-node tests the alias may not + // appear immediately so wait here. + assertBusy(() -> { + try { + String aliases = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/aliases")).getEntity()); + assertThat(aliases, containsString(readAliasName)); + assertThat(aliases, containsString(writeAliasName)); + } catch (ResponseException e) { + throw new AssertionError(e); + } + }); + + // Manually delete the aliases so that we can test that deletion proceeds + // normally anyway + client().performRequest(new Request("DELETE", indexName + "/_alias/" + readAliasName)); + client().performRequest(new Request("DELETE", indexName + "/_alias/" + writeAliasName)); + + // check aliases were deleted + expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + readAliasName))); + expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + writeAliasName))); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMultiIndexDelete() throws Exception { + String jobId = "multi-index-delete-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + client().performRequest(new Request("PUT", indexName + "-001")); + client().performRequest(new Request("PUT", indexName + "-002")); + + String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesBeforeDelete, containsString(indexName)); + assertThat(indicesBeforeDelete, containsString(indexName + "-001")); + assertThat(indicesBeforeDelete, containsString(indexName + "-002")); + + // Add some documents to each index to make sure the DBQ clears them out + Request createDoc0 = new Request("PUT", indexName + "/doc/" + 123); + createDoc0.setJsonEntity(String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"bucket_span\":%d, \"result_type\":\"record\"}", + jobId, 123, 1)); + client().performRequest(createDoc0); + Request createDoc1 = new Request("PUT", indexName + "-001/doc/" + 123); + createDoc1.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc1); + Request createDoc2 = new Request("PUT", indexName + "-002/doc/" + 123); + createDoc2.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc2); + + // Also index a few through the alias for the first job + Request createDoc3 = new Request("PUT", indexName + "/doc/" + 456); + createDoc3.setEntity(createDoc0.getEntity()); + client().performRequest(createDoc3); + + client().performRequest(new Request("POST", "/_refresh")); + + // check for the documents + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "/_count")).getEntity()), + containsString("\"count\":2")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-001/_count")).getEntity()), + containsString("\"count\":1")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-002/_count")).getEntity()), + containsString("\"count\":1")); + + // Delete + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + client().performRequest(new Request("POST", "/_refresh")); + + // check that the indices still exist but are empty + String indicesAfterDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); + assertThat(indicesAfterDelete, containsString(indexName)); + assertThat(indicesAfterDelete, containsString(indexName + "-001")); + assertThat(indicesAfterDelete, containsString(indexName + "-002")); + + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "/_count")).getEntity()), + containsString("\"count\":0")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-001/_count")).getEntity()), + containsString("\"count\":0")); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", indexName+ "-002/_count")).getEntity()), + containsString("\"count\":0")); + + + expectThrows(ResponseException.class, () -> + client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + } + + public void testDelete_multipleRequest() throws Exception { + String jobId = "delete-job-mulitple-times"; + createFarequoteJob(jobId); + + ConcurrentMapLong responses = ConcurrentCollections.newConcurrentMapLong(); + ConcurrentMapLong responseExceptions = ConcurrentCollections.newConcurrentMapLong(); + AtomicReference ioe = new AtomicReference<>(); + AtomicInteger recreationGuard = new AtomicInteger(0); + AtomicReference recreationResponse = new AtomicReference<>(); + AtomicReference recreationException = new AtomicReference<>(); + + Runnable deleteJob = () -> { + try { + boolean forceDelete = randomBoolean(); + String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId; + if (forceDelete) { + url += "?force=true"; + } + Response response = client().performRequest(new Request("DELETE", url)); + responses.put(Thread.currentThread().getId(), response); + } catch (ResponseException re) { + responseExceptions.put(Thread.currentThread().getId(), re); + } catch (IOException e) { + ioe.set(e); + } + + // Immediately after the first deletion finishes, recreate the job. This should pick up + // race conditions where another delete request deletes part of the newly created job. + if (recreationGuard.getAndIncrement() == 0) { + try { + recreationResponse.set(createFarequoteJob(jobId)); + } catch (ResponseException re) { + recreationException.set(re); + } catch (IOException e) { + ioe.set(e); + } + } + }; + + // The idea is to hit the situation where one request waits for + // the other to complete. This is difficult to schedule but + // hopefully it will happen in CI + int numThreads = 5; + Thread [] threads = new Thread[numThreads]; + for (int i=0; i events = new ArrayList<>(); long eventStartTime = startTime + (bucketCount + 1) * bucketSpan.millis(); @@ -257,6 +259,81 @@ public void testOnlineUpdate() throws Exception { assertEquals(0, buckets.get(8).getScheduledEvents().size()); } + /** + * An open job that later gets added to a calendar, should take the scheduled events into account + */ + public void testAddOpenedJobToGroupWithCalendar() throws Exception { + TimeValue bucketSpan = TimeValue.timeValueMinutes(30); + String groupName = "opened-calendar-job-group"; + Job.Builder job = createJob("scheduled-events-add-opened-job-to-group-with-calendar", bucketSpan); + + long startTime = 1514764800000L; + final int bucketCount = 5; + + // Open the job + openJob(job.getId()); + + // write some buckets of data + postData(job.getId(), generateData(startTime, bucketSpan, bucketCount, bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + + String calendarId = "test-calendar-open-job-update"; + + // Create a new calendar referencing groupName + putCalendar(calendarId, Collections.singletonList(groupName), "testAddOpenedJobToGroupWithCalendar calendar"); + + // Put events in the calendar + List events = new ArrayList<>(); + long eventStartTime = startTime + (bucketCount + 1) * bucketSpan.millis(); + long eventEndTime = eventStartTime + (long)(1.5 * bucketSpan.millis()); + events.add(new ScheduledEvent.Builder().description("Some Event") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + + postScheduledEvents(calendarId, events); + + // Update the job to be a member of the group + UpdateJobAction.Request jobUpdateRequest = new UpdateJobAction.Request(job.getId(), + new JobUpdate.Builder(job.getId()).setGroups(Collections.singletonList(groupName)).build()); + client().execute(UpdateJobAction.INSTANCE, jobUpdateRequest).actionGet(); + + // Wait until the notification that the job was updated is indexed + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + .setSize(1) + .addSort("timestamp", SortOrder.DESC) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("job_id", job.getId())) + .filter(QueryBuilders.termQuery("level", "info")) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(hits.length, equalTo(1)); + assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Job updated: [groups]")); + }); + + // write some more buckets of data that cover the scheduled event period + postData(job.getId(), generateData(startTime + bucketCount * bucketSpan.millis(), bucketSpan, 5, + bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + // and close + closeJob(job.getId()); + + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); + List buckets = getBuckets(getBucketsRequest); + + // the first 6 buckets have no events + for (int i=0; i<=bucketCount; i++) { + assertEquals(0, buckets.get(i).getScheduledEvents().size()); + } + // 7th and 8th buckets have the event but the last one does not + assertEquals(1, buckets.get(6).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(6).getScheduledEvents().get(0)); + assertEquals(1, buckets.get(7).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(7).getScheduledEvents().get(0)); + assertEquals(0, buckets.get(8).getScheduledEvents().size()); + } + private Job.Builder createJob(String jobId, TimeValue bucketSpan) { Detector.Builder detector = new Detector.Builder("count", null); AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java diff --git a/x-pack/qa/ml-no-bootstrap-tests/build.gradle b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle similarity index 64% rename from x-pack/qa/ml-no-bootstrap-tests/build.gradle rename to x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle index 7e252afa3022e..9eac3fdd37a80 100644 --- a/x-pack/qa/ml-no-bootstrap-tests/build.gradle +++ b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle @@ -1,6 +1,6 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java b/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java similarity index 100% rename from x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java rename to x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java diff --git a/x-pack/qa/ml-single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle similarity index 80% rename from x-pack/qa/ml-single-node-tests/build.gradle rename to x-pack/plugin/ml/qa/single-node-tests/build.gradle index b62e37894b3c3..88ca4dd118ea4 100644 --- a/x-pack/qa/ml-single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java similarity index 84% rename from x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java rename to x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index 79e9a81831fc8..ffd869a4a6e2f 100644 --- a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.xpack.ml.transforms; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -18,7 +17,6 @@ import org.joda.time.DateTime; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -185,9 +183,10 @@ public void testIsolated() throws Exception { .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); createIndex("painless", settings.build()); - client().performRequest("PUT", "painless/test/1", Collections.emptyMap(), - new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON)); - client().performRequest("POST", "painless/_refresh"); + Request createDoc = new Request("PUT", "/painless/test/1"); + createDoc.setJsonEntity("{\"test\": \"test\"}"); + createDoc.addParameter("refresh", "true"); + client().performRequest(createDoc); Pattern pattern = Pattern.compile("domain_split\":\\[(.*?),(.*?)\\]"); @@ -198,7 +197,9 @@ public void testIsolated() throws Exception { String mapAsJson = Strings.toString(jsonBuilder().map(params)); logger.info("params={}", mapAsJson); - StringEntity body = new StringEntity("{\n" + + Request searchRequest = new Request("GET", "/painless/test/_search"); + searchRequest.setJsonEntity( + "{\n" + " \"query\" : {\n" + " \"match_all\": {}\n" + " },\n" + @@ -212,10 +213,8 @@ public void testIsolated() throws Exception { " }\n" + " }\n" + " }\n" + - "}", ContentType.APPLICATION_JSON); - - Response response = client().performRequest("GET", "painless/test/_search", Collections.emptyMap(), body); - String responseBody = EntityUtils.toString(response.getEntity()); + "}"); + String responseBody = EntityUtils.toString(client().performRequest(searchRequest).getEntity()); Matcher m = pattern.matcher(responseBody); String actualSubDomain = ""; @@ -240,25 +239,25 @@ public void testIsolated() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32966") public void testHRDSplit() throws Exception { - // Create job - String job = "{\n" + - " \"description\":\"Domain splitting\",\n" + - " \"analysis_config\" : {\n" + - " \"bucket_span\":\"3600s\",\n" + - " \"detectors\" :[{\"function\":\"count\", \"by_field_name\" : \"domain_split\"}]\n" + - " },\n" + - " \"data_description\" : {\n" + - " \"field_delimiter\":\",\",\n" + - " \"time_field\":\"time\"\n" + - " \n" + - " }\n" + - " }"; - - client().performRequest("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job", Collections.emptyMap(), - new StringEntity(job, ContentType.APPLICATION_JSON)); - client().performRequest("POST", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/_open"); + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job"); + createJobRequest.setJsonEntity( + "{\n" + + " \"description\":\"Domain splitting\",\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\":\"3600s\",\n" + + " \"detectors\" :[{\"function\":\"count\", \"by_field_name\" : \"domain_split\"}]\n" + + " },\n" + + " \"data_description\" : {\n" + + " \"field_delimiter\":\",\",\n" + + " \"time_field\":\"time\"\n" + + " \n" + + " }\n" + + "}"); + client().performRequest(createJobRequest); + client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/_open")); // Create index to hold data Settings.Builder settings = Settings.builder() @@ -283,44 +282,43 @@ public void testHRDSplit() throws Exception { if (i == 64) { // Anomaly has 100 docs, but we don't care about the value for (int j = 0; j < 100; j++) { - client().performRequest("PUT", "painless/test/" + time.toDateTimeISO() + "_" + j, - Collections.emptyMap(), - new StringEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + time.toDateTimeISO() - + "\"}", ContentType.APPLICATION_JSON)); + Request createDocRequest = new Request("PUT", "/painless/test/" + time.toDateTimeISO() + "_" + j); + createDocRequest.setJsonEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + client().performRequest(createDocRequest); } } else { // Non-anomalous values will be what's seen when the anomaly is reported - client().performRequest("PUT", "painless/test/" + time.toDateTimeISO(), - Collections.emptyMap(), - new StringEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + time.toDateTimeISO() - + "\"}", ContentType.APPLICATION_JSON)); + Request createDocRequest = new Request("PUT", "/painless/test/" + time.toDateTimeISO()); + createDocRequest.setJsonEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + client().performRequest(createDocRequest); } } - client().performRequest("POST", "painless/_refresh"); + client().performRequest(new Request("POST", "/painless/_refresh")); // Create and start datafeed - String body = "{\n" + - " \"job_id\":\"hrd-split-job\",\n" + - " \"indexes\":[\"painless\"],\n" + - " \"types\":[\"test\"],\n" + - " \"script_fields\": {\n" + - " \"domain_split\": {\n" + - " \"script\": \"return domainSplit(doc['domain'].value, params);\"\n" + - " }\n" + - " }\n" + - " }"; - - client().performRequest("PUT", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); - client().performRequest("POST", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed/_start"); + Request createFeedRequest = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed"); + createFeedRequest.setJsonEntity( + "{\n" + + " \"job_id\":\"hrd-split-job\",\n" + + " \"indexes\":[\"painless\"],\n" + + " \"types\":[\"test\"],\n" + + " \"script_fields\": {\n" + + " \"domain_split\": {\n" + + " \"script\": \"return domainSplit(doc['domain'].value, params);\"\n" + + " }\n" + + " }\n" + + "}"); + + client().performRequest(createFeedRequest); + client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed/_start")); boolean passed = awaitBusy(() -> { try { - client().performRequest("POST", "/_refresh"); + client().performRequest(new Request("POST", "/_refresh")); - Response response = client().performRequest("GET", - MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/results/records"); + Response response = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/results/records")); String responseBody = EntityUtils.toString(response.getEntity()); if (responseBody.contains("\"count\":2")) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java index 3ca3c3154506a..252cf97d0c519 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -53,11 +51,6 @@ protected void doExecute(Task task, IsolateDatafeedAction.Request request, Actio String executorNode = datafeedTask.getExecutorNode(); DiscoveryNodes nodes = state.nodes(); - if (nodes.resolveNode(executorNode).getVersion().before(Version.V_5_5_0)) { - listener.onFailure(new ElasticsearchException("Force delete datafeed is not supported because the datafeed task " + - "is running on a node [" + executorNode + "] with a version prior to " + Version.V_5_5_0)); - return; - } request.setNodes(datafeedTask.getExecutorNode()); super.doExecute(task, request, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java index b40f0368a1554..a9b43c3bcc47d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -73,12 +71,6 @@ protected void doExecute(Task task, KillProcessAction.Request request, ActionLis return; } - Version nodeVersion = executorNode.getVersion(); - if (nodeVersion.before(Version.V_5_5_0)) { - listener.onFailure(new ElasticsearchException("Cannot kill the process on node with version " + nodeVersion)); - return; - } - super.doExecute(task, request, listener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 56d03dd1aacc6..512d8188abfac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -179,14 +179,6 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - if (nodeSupportsJobVersion(node.getVersion()) == false) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) - + "], because this node does not support jobs of version [" + job.getJobVersion() + "]"; - logger.trace(reason); - reasons.add(reason); - continue; - } - if (nodeSupportsModelSnapshotVersion(node, job) == false) { String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because the job's model snapshot requires a node of version [" @@ -385,10 +377,6 @@ static List verifyIndicesPrimaryShardsAreActive(String jobId, ClusterSta return unavailableIndices; } - private static boolean nodeSupportsJobVersion(Version nodeVersion) { - return nodeVersion.onOrAfter(Version.V_5_5_0); - } - private static boolean nodeSupportsModelSnapshotVersion(DiscoveryNode node, Job job) { if (job.getModelSnapshotId() == null || job.getModelSnapshotMinVersion() == null) { // There is no snapshot to restore or the min model snapshot version is 5.5.0 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 0ea9eb7764803..f3f4a771443ef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; @@ -46,10 +47,10 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedNodeSelector; -import org.elasticsearch.xpack.ml.datafeed.MlRemoteLicenseChecker; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Predicate; @@ -141,19 +142,22 @@ public void onFailure(Exception e) { DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); Job job = mlMetadata.getJobs().get(datafeed.getJobId()); - if (MlRemoteLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { - MlRemoteLicenseChecker remoteLicenseChecker = new MlRemoteLicenseChecker(client); - remoteLicenseChecker.checkRemoteClusterLicenses(MlRemoteLicenseChecker.remoteClusterNames(datafeed.getIndices()), + if (RemoteClusterLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { + final RemoteClusterLicenseChecker remoteClusterLicenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isMachineLearningAllowedForOperationMode); + remoteClusterLicenseChecker.checkRemoteClusterLicenses( + RemoteClusterLicenseChecker.remoteClusterAliases(datafeed.getIndices()), ActionListener.wrap( response -> { - if (response.isViolated()) { + if (response.isSuccess() == false) { listener.onFailure(createUnlicensedError(datafeed.getId(), response)); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); } }, - e -> listener.onFailure(createUnknownLicenseError(datafeed.getId(), - MlRemoteLicenseChecker.remoteIndices(datafeed.getIndices()), e)) + e -> listener.onFailure( + createUnknownLicenseError( + datafeed.getId(), RemoteClusterLicenseChecker.remoteIndices(datafeed.getIndices()), e)) )); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); @@ -232,23 +236,35 @@ public void onFailure(Exception e) { ); } - private ElasticsearchStatusException createUnlicensedError(String datafeedId, - MlRemoteLicenseChecker.LicenseViolation licenseViolation) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use " - + "indices on a remote cluster [" + licenseViolation.get().getClusterName() - + "] that is not licensed for Machine Learning. " - + MlRemoteLicenseChecker.buildErrorMessage(licenseViolation.get()); - + private ElasticsearchStatusException createUnlicensedError( + final String datafeedId, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it is configured to use indices on remote cluster [%s] that is not licensed for ml; %s", + datafeedId, + licenseCheck.remoteClusterLicenseInfo().clusterAlias(), + RemoteClusterLicenseChecker.buildErrorMessage( + "ml", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); } - private ElasticsearchStatusException createUnknownLicenseError(String datafeedId, List remoteIndices, - Exception cause) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use" - + " indices on a remote cluster " + remoteIndices - + " but the license type could not be verified"; - - return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, new Exception(cause.getMessage())); + private ElasticsearchStatusException createUnknownLicenseError( + final String datafeedId, final List remoteIndices, final Exception cause) { + final int numberOfRemoteClusters = RemoteClusterLicenseChecker.remoteClusterAliases(remoteIndices).size(); + assert numberOfRemoteClusters > 0; + final String remoteClusterQualifier = numberOfRemoteClusters == 1 ? "a remote cluster" : "remote clusters"; + final String licenseTypeQualifier = numberOfRemoteClusters == 1 ? "" : "s"; + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it uses indices on %s %s but the license type%s could not be verified", + datafeedId, + remoteClusterQualifier, + remoteIndices, + licenseTypeQualifier); + + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); } public static class StartDatafeedPersistentTasksExecutor extends PersistentTasksExecutor { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index a6be047648623..ce3f611b2227a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -92,7 +93,7 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { List indices = datafeed.getIndices(); for (String index : indices) { - if (MlRemoteLicenseChecker.isRemoteIndex(index)) { + if (RemoteClusterLicenseChecker.isRemoteIndex(index)) { // We cannot verify remote indices continue; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java deleted file mode 100644 index b0eeed2c800ec..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.License; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.EnumSet; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -/** - * ML datafeeds can use cross cluster search to access data in a remote cluster. - * The remote cluster should be licenced for ML this class performs that check - * using the _xpack (info) endpoint. - */ -public class MlRemoteLicenseChecker { - - private final Client client; - - public static class RemoteClusterLicenseInfo { - private final String clusterName; - private final XPackInfoResponse.LicenseInfo licenseInfo; - - RemoteClusterLicenseInfo(String clusterName, XPackInfoResponse.LicenseInfo licenseInfo) { - this.clusterName = clusterName; - this.licenseInfo = licenseInfo; - } - - public String getClusterName() { - return clusterName; - } - - public XPackInfoResponse.LicenseInfo getLicenseInfo() { - return licenseInfo; - } - } - - public class LicenseViolation { - private final RemoteClusterLicenseInfo licenseInfo; - - private LicenseViolation(@Nullable RemoteClusterLicenseInfo licenseInfo) { - this.licenseInfo = licenseInfo; - } - - public boolean isViolated() { - return licenseInfo != null; - } - - public RemoteClusterLicenseInfo get() { - return licenseInfo; - } - } - - public MlRemoteLicenseChecker(Client client) { - this.client = client; - } - - /** - * Check each cluster is licensed for ML. - * This function evaluates lazily and will terminate when the first cluster - * that is not licensed is found or an error occurs. - * - * @param clusterNames List of remote cluster names - * @param listener Response listener - */ - public void checkRemoteClusterLicenses(List clusterNames, ActionListener listener) { - final Iterator itr = clusterNames.iterator(); - if (itr.hasNext() == false) { - listener.onResponse(new LicenseViolation(null)); - return; - } - - final AtomicReference clusterName = new AtomicReference<>(itr.next()); - - ActionListener infoListener = new ActionListener() { - @Override - public void onResponse(XPackInfoResponse xPackInfoResponse) { - if (licenseSupportsML(xPackInfoResponse.getLicenseInfo()) == false) { - listener.onResponse(new LicenseViolation( - new RemoteClusterLicenseInfo(clusterName.get(), xPackInfoResponse.getLicenseInfo()))); - return; - } - - if (itr.hasNext()) { - clusterName.set(itr.next()); - remoteClusterLicense(clusterName.get(), this); - } else { - listener.onResponse(new LicenseViolation(null)); - } - } - - @Override - public void onFailure(Exception e) { - String message = "Could not determine the X-Pack licence type for cluster [" + clusterName.get() + "]"; - if (e instanceof ActionNotFoundTransportException) { - // This is likely to be because x-pack is not installed in the target cluster - message += ". Is X-Pack installed on the target cluster?"; - } - listener.onFailure(new ElasticsearchException(message, e)); - } - }; - - remoteClusterLicense(clusterName.get(), infoListener); - } - - private void remoteClusterLicense(String clusterName, ActionListener listener) { - Client remoteClusterClient = client.getRemoteClusterClient(clusterName); - ThreadContext threadContext = remoteClusterClient.threadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - - XPackInfoRequest request = new XPackInfoRequest(); - request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); - remoteClusterClient.execute(XPackInfoAction.INSTANCE, request, listener); - } - } - - static boolean licenseSupportsML(XPackInfoResponse.LicenseInfo licenseInfo) { - License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); - return licenseInfo.getStatus() == LicenseStatus.ACTIVE && - (mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL); - } - - public static boolean isRemoteIndex(String index) { - return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; - } - - public static boolean containsRemoteIndex(List indices) { - return indices.stream().anyMatch(MlRemoteLicenseChecker::isRemoteIndex); - } - - /** - * Get any remote indices used in cross cluster search. - * Remote indices are of the form {@code cluster_name:index_name} - * @return List of remote cluster indices - */ - public static List remoteIndices(List indices) { - return indices.stream().filter(MlRemoteLicenseChecker::isRemoteIndex).collect(Collectors.toList()); - } - - /** - * Extract the list of remote cluster names from the list of indices. - * @param indices List of indices. Remote cluster indices are prefixed - * with {@code cluster-name:} - * @return Every cluster name found in {@code indices} - */ - public static List remoteClusterNames(List indices) { - return indices.stream() - .filter(MlRemoteLicenseChecker::isRemoteIndex) - .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) - .distinct() - .collect(Collectors.toList()); - } - - public static String buildErrorMessage(RemoteClusterLicenseInfo clusterLicenseInfo) { - StringBuilder error = new StringBuilder(); - if (clusterLicenseInfo.licenseInfo.getStatus() != LicenseStatus.ACTIVE) { - error.append("The license on cluster [").append(clusterLicenseInfo.clusterName) - .append("] is not active. "); - } else { - License.OperationMode mode = License.OperationMode.resolve(clusterLicenseInfo.licenseInfo.getMode()); - if (mode != License.OperationMode.PLATINUM && mode != License.OperationMode.TRIAL) { - error.append("The license mode [").append(mode) - .append("] on cluster [") - .append(clusterLicenseInfo.clusterName) - .append("] does not enable Machine Learning. "); - } - } - - error.append(Strings.toString(clusterLicenseInfo.licenseInfo)); - return error.toString(); - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java index 127fb18e5fff4..2d338890f9fa4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java @@ -66,6 +66,7 @@ public static UpdateParams fromJobUpdate(JobUpdate jobUpdate) { return new Builder(jobUpdate.getJobId()) .modelPlotConfig(jobUpdate.getModelPlotConfig()) .detectorUpdates(jobUpdate.getDetectorUpdates()) + .updateScheduledEvents(jobUpdate.getGroups() != null) .build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java index fc0638048ce98..ae6257d53850f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java @@ -12,10 +12,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; @@ -34,16 +34,21 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(restRequest.param(Job.ID.getPreferredName())); - if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { - request.setCloseTimeout(TimeValue.parseTimeValue( + Request request; + if (restRequest.hasContentOrSourceParam()) { + request = Request.parseRequest(restRequest.param(Job.ID.getPreferredName()), restRequest.contentParser()); + } else { + request = new Request(restRequest.param(Job.ID.getPreferredName())); + if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { + request.setCloseTimeout(TimeValue.parseTimeValue( restRequest.param(Request.TIMEOUT.getPreferredName()), Request.TIMEOUT.getPreferredName())); - } - if (restRequest.hasParam(Request.FORCE.getPreferredName())) { - request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); - } - if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { - request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } + if (restRequest.hasParam(Request.FORCE.getPreferredName())) { + request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); + } + if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { + request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } } return channel -> client.execute(CloseJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 02bfb1b326fd9..5bf8fb6956bfe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -423,33 +423,6 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { assertNull(result.getExecutorNode()); } - public void testSelectLeastLoadedMlNode_noNodesPriorTo_V_5_5() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("incompatible_type_job", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "incompatible_type_job"); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", cs.build(), 2, 10, 30, logger); - assertThat(result.getExplanation(), containsString("because this node does not support jobs of version [" + Version.CURRENT + "]")); - assertNull(result.getExecutorNode()); - } - public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() { Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); @@ -606,12 +579,6 @@ public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOExceptio assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); } - public void testMappingRequiresUpdateOldMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_54", Version.V_5_4_0.toString())); - String[] indices = new String[] { "version_54" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); String[] indices = new String[] { "version_bogus" }; @@ -632,21 +599,6 @@ public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOExcepti TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion(), logger)); } - public void testMappingRequiresUpdateSomeVersionMix() throws IOException { - Map versionMix = new HashMap<>(); - versionMix.put("version_54", Version.V_5_4_0); - versionMix.put("version_current", Version.CURRENT); - versionMix.put("version_null", null); - versionMix.put("version_current2", Version.CURRENT); - versionMix.put("version_bogus", "0.0.0"); - versionMix.put("version_current3", Version.CURRENT); - versionMix.put("version_bogus2", "0.0.0"); - - ClusterState cs = getClusterStateWithMappingsWithMetaData(versionMix); - String[] indices = new String[] { "version_54", "version_null", "version_bogus", "version_bogus2" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testNodeNameAndVersion() { TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); Map attributes = new HashMap<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 72c8d361dd882..610a5c1b92fb6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -14,7 +16,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java index 8b3e68b1e5714..32699f60cbdb9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.datafeed; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DatafeedStateTests extends ESTestCase { @@ -37,35 +26,4 @@ public void testValidOrdinals() { assertEquals(2, DatafeedState.STARTING.ordinal()); assertEquals(3, DatafeedState.STOPPING.ordinal()); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // STARTING & STOPPING states were introduced in v5.5. - // Pre v5.5 STARTING translated as STOPPED - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STOPPED, enumCaptor.getValue()); - - // Pre v5.5 STOPPING means the datafeed is STARTED - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STARTED, enumCaptor.getValue()); - - // POST 5.5 enums a written as is - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STARTING, enumCaptor.getValue()); - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STOPPING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java deleted file mode 100644 index 81e4c75cfad7c..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.is; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class MlRemoteLicenseCheckerTests extends ESTestCase { - - public void testIsRemoteIndex() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertFalse(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - indices = Arrays.asList("local-index1", "remote-cluster:remote-index2"); - assertTrue(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - } - - public void testRemoteIndices() { - List indices = Collections.singletonList("local-index"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), is(empty())); - indices = Arrays.asList("local-index", "remote-cluster:index1", "local-index2", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), containsInAnyOrder("remote-cluster:index1", "remote-cluster2:index1")); - } - - public void testRemoteClusterNames() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), empty()); - indices = Arrays.asList("local-index1", "remote-cluster1:remote-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1", "remote-cluster2:index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - } - - public void testLicenseSupportsML() { - XPackInfoResponse.LicenseInfo licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", - LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", LicenseStatus.EXPIRED, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "GOLD", "GOLD", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - } - - public void testCheckRemoteClusterLicenses_givenValidLicenses() { - final AtomicInteger index = new AtomicInteger(0); - final List responses = new ArrayList<>(); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - - List remoteClusterNames = Arrays.asList("valid1", "valid2", "valid3"); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertFalse(licCheckResponse.get().isViolated()); - assertNull(licCheckResponse.get().get()); - } - - public void testCheckRemoteClusterLicenses_givenInvalidLicense() { - final AtomicInteger index = new AtomicInteger(0); - List remoteClusterNames = Arrays.asList("good", "cluster-with-basic-license", "good2"); - final List responses = new ArrayList<>(); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertTrue(licCheckResponse.get().isViolated()); - assertEquals("cluster-with-basic-license", licCheckResponse.get().get().getClusterName()); - assertEquals("BASIC", licCheckResponse.get().get().getLicenseInfo().getType()); - } - - public void testBuildErrorMessage() { - XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); - MlRemoteLicenseChecker.RemoteClusterLicenseInfo info = - new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); - assertEquals(Strings.toString(platinumLicence), MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); - String expected = "The license mode [BASIC] on cluster [basic-cluster] does not enable Machine Learning. " - + Strings.toString(basicLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); - expected = "The license on cluster [expired-cluster] is not active. " + Strings.toString(expiredLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - } - - private Client createMockClient() { - Client client = mock(Client.class); - ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - when(client.getRemoteClusterClient(anyString())).thenReturn(client); - return client; - } - - private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 7ed00a01f3565..c83521591fcf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -392,7 +392,7 @@ public void testResetScrollAfterSearchPhaseExecutionException() throws IOExcepti assertThat(extractor.hasNext(), is(true)); output = extractor.next(); assertThat(output.isPresent(), is(true)); - assertEquals(new Long(1400L), extractor.getLastTimestamp()); + assertEquals(Long.valueOf(1400L), extractor.getLastTimestamp()); // A second failure is not tolerated assertThat(extractor.hasNext(), is(true)); expectThrows(SearchPhaseExecutionException.class, extractor::next); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 303fce3d81321..e36c313b626c9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -125,7 +125,7 @@ public void testGetCalandarByJobId() throws Exception { Long matchedCount = queryResult.stream().filter( c -> c.getId().equals("foo calendar") || c.getId().equals("foo bar calendar") || c.getId().equals("cat foo calendar")) .count(); - assertEquals(new Long(3), matchedCount); + assertEquals(Long.valueOf(3), matchedCount); queryResult = getCalendars("bar"); assertThat(queryResult, hasSize(1)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java index cd983c6b0302b..2e324b6a1c201 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.config; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class JobStateTests extends ESTestCase { @@ -60,35 +49,4 @@ public void testIsAnyOf() { assertTrue(JobState.CLOSED.isAnyOf(JobState.CLOSED)); assertTrue(JobState.CLOSING.isAnyOf(JobState.CLOSING)); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // OPENING state was introduced in v5.5. - // Pre v5.5 its translated as CLOSED - JobState.OPENING.writeTo(out); - assertEquals(JobState.CLOSED, enumCaptor.getValue()); - - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - JobState.OPENING.writeTo(out); - assertEquals(JobState.OPENING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java new file mode 100644 index 0000000000000..2683c1131f5bf --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.process.autodetect; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + + +public class UpdateParamsTests extends ESTestCase { + + public void testFromJobUpdate() { + String jobId = "foo"; + DetectionRule rule = new DetectionRule.Builder(Arrays.asList( + new RuleCondition(RuleCondition.AppliesTo.ACTUAL, + Operator.GT, 1.0))).build(); + List rules = Arrays.asList(rule); + List detectorUpdates = Collections.singletonList( + new JobUpdate.DetectorUpdate(2, null, rules)); + JobUpdate.Builder updateBuilder = new JobUpdate.Builder(jobId) + .setModelPlotConfig(new ModelPlotConfig()) + .setDetectorUpdates(detectorUpdates); + + UpdateParams params = UpdateParams.fromJobUpdate(updateBuilder.build()); + + assertFalse(params.isUpdateScheduledEvents()); + assertEquals(params.getDetectorUpdates(), updateBuilder.build().getDetectorUpdates()); + assertEquals(params.getModelPlotConfig(), updateBuilder.build().getModelPlotConfig()); + + params = UpdateParams.fromJobUpdate(updateBuilder.setGroups(Arrays.asList("bar")).build()); + + assertTrue(params.isUpdateScheduledEvents()); + } + +} diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index a452ef09a20ff..e551d577b7bbd 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -13,7 +13,8 @@ esplugin { archivesBaseName = 'x-pack-monitoring' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // monitoring deps diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java index f23a545b6a5ff..ded3064a2a66f 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.RestClient; @@ -94,9 +94,13 @@ public void doFlush(ActionListener listener) throws ExportException { if (payload == null) { listener.onFailure(new ExportException("unable to send documents because none were loaded for export bulk [{}]", name)); } else if (payload.length != 0) { - final HttpEntity body = new ByteArrayEntity(payload, ContentType.APPLICATION_JSON); + final Request request = new Request("POST", "/_bulk"); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(new NByteArrayEntity(payload, ContentType.APPLICATION_JSON)); - client.performRequestAsync("POST", "/_bulk", params, body, new ResponseListener() { + client.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { try { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json index c0a13ea63a640..e1f418d5a8d71 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json @@ -145,7 +145,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json index a6bf7b6145ce8..5c0cb7f55b4e5 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json @@ -151,7 +151,7 @@ }, "transform": { "script": { - "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" + "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json index 7e18c981f0f1f..051a3a9d40921 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json @@ -141,7 +141,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json index bf2da3ffb1ddd..b2acba610e141 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json index 71a0cfd46bfd1..cf1fdde606c7a 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json index a05198a15eb9c..7eb0d59167dbd 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json @@ -134,7 +134,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java index 57106363bc199..dc294ef53de52 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.action; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -21,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import static java.util.Collections.emptyList; @@ -158,23 +155,6 @@ public void testSerialization() throws IOException { } } - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AQNtSWQBBTUuMS4yAAAAAQEEdHlwZQECaWQNeyJmb28iOiJiYXIifQAAAAAAAAAA"); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - MonitoringBulkDoc bulkDoc = MonitoringBulkDoc.readFrom(in); - assertEquals(MonitoredSystem.UNKNOWN, bulkDoc.getSystem()); - assertEquals("type", bulkDoc.getType()); - assertEquals("id", bulkDoc.getId()); - assertEquals(0L, bulkDoc.getTimestamp()); - assertEquals(0L, bulkDoc.getIntervalMillis()); - assertEquals("{\"foo\":\"bar\"}", bulkDoc.getSource().utf8ToString()); - assertEquals(XContentType.JSON, bulkDoc.getXContentType()); - } - } - /** * Test that we allow strings to be "" because Logstash 5.2 - 5.3 would submit empty _id values for time-based documents */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index b336b3c885310..dc5cad7c94fd4 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.monitoring.action; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -26,7 +25,6 @@ import java.util.Collection; import java.util.List; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -254,52 +252,6 @@ public void testSerialization() throws IOException { assertArrayEquals(originalBulkDocs, deserializedBulkDocs); } - public void testSerializationBwc() throws IOException { - final MonitoringBulkRequest originalRequest = new MonitoringBulkRequest(); - - final int numDocs = iterations(10, 30); - for (int i = 0; i < numDocs; i++) { - originalRequest.add(randomMonitoringBulkDoc()); - } - - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_rc1); - - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - originalRequest.writeTo(out); - - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - - final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(); - deserializedRequest.readFrom(in); - - assertThat(in.available(), equalTo(0)); - - final MonitoringBulkDoc[] originalBulkDocs = originalRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - final MonitoringBulkDoc[] deserializedBulkDocs = deserializedRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - - assertThat(originalBulkDocs.length, equalTo(deserializedBulkDocs.length)); - - for (int i = 0; i < originalBulkDocs.length; i++) { - final MonitoringBulkDoc original = originalBulkDocs[i]; - final MonitoringBulkDoc deserialized = deserializedBulkDocs[i]; - - assertThat(deserialized.getSystem(), equalTo(original.getSystem())); - assertThat(deserialized.getType(), equalTo(original.getType())); - assertThat(deserialized.getId(), equalTo(original.getId())); - assertThat(deserialized.getTimestamp(), equalTo(original.getTimestamp())); - assertThat(deserialized.getSource(), equalTo(original.getSource())); - assertThat(deserialized.getXContentType(), equalTo(original.getXContentType())); - - if (version.onOrAfter(Version.V_6_0_0_rc1)) { - assertThat(deserialized.getIntervalMillis(), equalTo(original.getIntervalMillis())); - } else { - assertThat(deserialized.getIntervalMillis(), equalTo(0L)); - } - } - } - /** * Return a {@link XContentType} supported by the Monitoring Bulk API (JSON or Smile) */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java index 79279faa6f405..3d1a0bf9adedb 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java @@ -5,39 +5,11 @@ */ package org.elasticsearch.xpack.monitoring.collector; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; public class CollectorTests extends ESTestCase { public void testConvertNullNode() { assertEquals(null, Collector.convertNode(randomNonNegativeLong(), null)); } - - public void testConvertNode() { - final String name = randomBoolean() ? randomAlphaOfLength(5) : ""; - final String nodeId = randomAlphaOfLength(5); - final TransportAddress address = buildNewFakeTransportAddress(); - final Version version = randomFrom(Version.V_5_0_1, Version.V_5_3_0, Version.CURRENT); - final long timestamp = randomNonNegativeLong(); - - final Set roles = new HashSet<>(); - if (randomBoolean()) { - roles.addAll(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); - } - - final MonitoringDoc.Node expectedNode = new MonitoringDoc.Node(nodeId, address.address().getHostString(), address.toString(), - address.getAddress(), name, timestamp); - - DiscoveryNode discoveryNode = new DiscoveryNode(name, nodeId, address, Collections.emptyMap(), roles, version); - assertEquals(expectedNode, Collector.convertNode(timestamp, discoveryNode)); - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java index 513ee3bdbb66b..46ba34dcd1a50 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.exporter; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -31,14 +29,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import java.util.Map; import static java.util.Collections.emptyList; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -273,22 +269,4 @@ public void testMonitoringNodeSerialization() throws IOException { assertEquals(deserialized.hashCode(), original.hashCode()); assertNotSame(deserialized, original); } - - public void testMonitoringNodeBwcSerialization() throws IOException { - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_beta2); - - final byte[] data = Base64.getDecoder() - .decode("AQVFSWJKdgEDdFFOAQV3cGtMagEFa2xqeWEBBVZTamF2AwVrZXkjMgEyBWtleSMxATEFa2V5IzABMAAAAAAAAA=="); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - - final MonitoringDoc.Node node = new MonitoringDoc.Node(in); - assertEquals("EIbJv", node.getUUID()); - assertEquals("VSjav", node.getName()); - assertEquals("tQN", node.getHost()); - assertEquals("wpkLj", node.getTransportAddress()); - assertEquals("kljya", node.getIp()); - assertEquals(0L, node.getTimestamp()); - } - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index efc32fccb3dda..6669c796018e1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -67,6 +67,7 @@ import static org.elasticsearch.threadpool.ThreadPool.Names.WRITE; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -119,8 +120,10 @@ public void testMonitoringBulk() throws Exception { // REST is the realistic way that these operations happen, so it's the most realistic way to integration test it too // Use Monitoring Bulk API to index 3 documents - //final Response bulkResponse = getRestClient().performRequest("POST", "/_xpack/monitoring/_bulk", - // parameters, createBulkEntity()); + //final Request bulkRequest = new Request("POST", "/_xpack/monitoring/_bulk"); + //< + //bulkRequest.setJsonEntity(createBulkEntity()); + //final Response bulkResponse = getRestClient().performRequest(request); final MonitoringBulkResponse bulkResponse = new MonitoringBulkRequestBuilder(client()) @@ -186,7 +189,6 @@ public void testMonitoringBulk() throws Exception { * This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents * have been indexed with the expected information. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880") public void testMonitoringService() throws Exception { final boolean createAPMIndex = randomBoolean(); final String indexName = createAPMIndex ? "apm-2017.11.06" : "books"; @@ -337,7 +339,7 @@ private void assertClusterStatsMonitoringDoc(final Map document, final Map clusterStats = (Map) source.get("cluster_stats"); assertThat(clusterStats, notNullValue()); - assertThat(clusterStats.size(), equalTo(4)); + assertThat(clusterStats.size(), equalTo(5)); final Map stackStats = (Map) source.get("stack_stats"); assertThat(stackStats, notNullValue()); @@ -347,7 +349,7 @@ private void assertClusterStatsMonitoringDoc(final Map document, assertThat(apm, notNullValue()); assertThat(apm.size(), equalTo(1)); assertThat(apm.remove("found"), is(apmIndicesExist)); - assertThat(apm.isEmpty(), is(true)); + assertThat(apm.keySet(), empty()); final Map xpackStats = (Map) stackStats.get("xpack"); assertThat(xpackStats, notNullValue()); @@ -359,14 +361,14 @@ private void assertClusterStatsMonitoringDoc(final Map document, final Map clusterState = (Map) source.get("cluster_state"); assertThat(clusterState, notNullValue()); - assertThat(clusterState.size(), equalTo(6)); assertThat(clusterState.remove("nodes_hash"), notNullValue()); assertThat(clusterState.remove("status"), notNullValue()); assertThat(clusterState.remove("version"), notNullValue()); assertThat(clusterState.remove("state_uuid"), notNullValue()); + assertThat(clusterState.remove("cluster_uuid"), notNullValue()); assertThat(clusterState.remove("master_node"), notNullValue()); assertThat(clusterState.remove("nodes"), notNullValue()); - assertThat(clusterState.isEmpty(), is(true)); + assertThat(clusterState.keySet(), empty()); } /** @@ -452,6 +454,11 @@ private void assertNodeStatsMonitoringDoc(final Map document) { return; } + // bulk is not a thread pool in the current version but we allow it to support mixed version clusters + if (filter.startsWith("node_stats.thread_pool.bulk")) { + return; + } + assertThat(filter + " must not be null in the monitoring document", extractValue(filter, source), notNullValue()); }); } diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index 649a89bc2cdee..75fd22abacc58 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -16,7 +16,8 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 0fc4d838f7ce8..09b2ccd079a76 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -194,7 +194,7 @@ public List> getPersistentTasksExecutor(ClusterServic return emptyList(); } - SchedulerEngine schedulerEngine = new SchedulerEngine(getClock()); + SchedulerEngine schedulerEngine = new SchedulerEngine(settings, getClock()); return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(settings, client, schedulerEngine, threadPool)); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index d1706fd708e93..8537f2b6a38b4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -17,7 +18,9 @@ import org.joda.time.DateTimeZone; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -32,6 +35,29 @@ public class RollupJobIdentifierUtils { private static final Comparator COMPARATOR = RollupJobIdentifierUtils.getComparator(); + public static final Map CALENDAR_ORDERING; + + static { + Map dateFieldUnits = new HashMap<>(16); + dateFieldUnits.put("year", 8); + dateFieldUnits.put("1y", 8); + dateFieldUnits.put("quarter", 7); + dateFieldUnits.put("1q", 7); + dateFieldUnits.put("month", 6); + dateFieldUnits.put("1M", 6); + dateFieldUnits.put("week", 5); + dateFieldUnits.put("1w", 5); + dateFieldUnits.put("day", 4); + dateFieldUnits.put("1d", 4); + dateFieldUnits.put("hour", 3); + dateFieldUnits.put("1h", 3); + dateFieldUnits.put("minute", 2); + dateFieldUnits.put("1m", 2); + dateFieldUnits.put("second", 1); + dateFieldUnits.put("1s", 1); + CALENDAR_ORDERING = Collections.unmodifiableMap(dateFieldUnits); + } + /** * Given the aggregation tree and a list of available job capabilities, this method will return a set * of the "best" jobs that should be searched. @@ -93,8 +119,9 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< if (fieldCaps != null) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval"); - String thisTimezone = (String) agg.get(DateHistogramGroupConfig.TIME_ZONE); + DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); + + String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); // Ensure we are working on the same timezone @@ -102,17 +129,20 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< continue; } if (source.dateHistogramInterval() != null) { - TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(), - "source.date_histogram.interval"); - //TODO should be divisor of interval - if (interval.compareTo(sourceInterval) <= 0) { + // Check if both are calendar and validate if they are. + // If not, check if both are fixed and validate + if (validateCalendarInterval(source.dateHistogramInterval(), interval)) { + localCaps.add(cap); + } else if (validateFixedInterval(source.dateHistogramInterval(), interval)) { localCaps.add(cap); } } else { - if (interval.getMillis() <= source.interval()) { + // check if config is fixed and validate if it is + if (validateFixedInterval(source.interval(), interval)) { localCaps.add(cap); } } + // not a candidate if we get here break; } } @@ -133,6 +163,55 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< } } + private static boolean isCalendarInterval(DateHistogramInterval interval) { + return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); + } + + static boolean validateCalendarInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Both must be calendar intervals + if (isCalendarInterval(requestInterval) == false || isCalendarInterval(configInterval) == false) { + return false; + } + + // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing + // relative orders between the calendar units + int requestOrder = CALENDAR_ORDERING.getOrDefault(requestInterval.toString(), Integer.MAX_VALUE); + int configOrder = CALENDAR_ORDERING.getOrDefault(configInterval.toString(), Integer.MAX_VALUE); + + // All calendar units are multiples naturally, so we just care about gte + return requestOrder >= configOrder; + } + + static boolean validateFixedInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Neither can be calendar intervals + if (isCalendarInterval(requestInterval) || isCalendarInterval(configInterval)) { + return false; + } + + // Both are fixed, good to conver to millis now + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + long requestIntervalMillis = TimeValue.parseTimeValue(requestInterval.toString(), + "date_histo.request.interval").getMillis(); + + // Must be a multiple and gte the config + return requestIntervalMillis >= configIntervalMillis && requestIntervalMillis % configIntervalMillis == 0; + } + + static boolean validateFixedInterval(long requestInterval, DateHistogramInterval configInterval) { + // config must not be a calendar interval + if (isCalendarInterval(configInterval)) { + return false; + } + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + + // Must be a multiple and gte the config + return requestInterval >= configIntervalMillis && requestInterval % configIntervalMillis == 0; + } + /** * Find the set of histo's with the largest interval */ @@ -144,8 +223,8 @@ private static void checkHisto(HistogramAggregationBuilder source, List agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { Long interval = (long)agg.get(RollupField.INTERVAL); - // TODO should be divisor of interval - if (interval <= source.interval()) { + // query interval must be gte the configured interval, and a whole multiple + if (interval <= source.interval() && source.interval() % interval == 0) { localCaps.add(cap); } break; @@ -155,8 +234,8 @@ private static void checkHisto(HistogramAggregationBuilder source, List { - if (r == null || r.getAggregations() == null || r.getAggregations().asList().size() == 0) { - throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + int missingRollupAggs = rolledResponses.stream().mapToInt(searchResponse -> { + if (searchResponse == null + || searchResponse.getAggregations() == null + || searchResponse.getAggregations().asList().size() == 0) { + return 1; } - }); + return 0; + }).sum(); + + // We had no rollup aggs, so there is nothing to process + if (missingRollupAggs == rolledResponses.size()) { + // Return an empty response, but make sure we include all the shard, failure, etc stats + return mergeFinalResponse(liveResponse, rolledResponses, InternalAggregations.EMPTY); + } else if (missingRollupAggs > 0 && missingRollupAggs != rolledResponses.size()) { + // We were missing some but not all the aggs, unclear how to handle this. Bail. + throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + } // The combination process returns a tree that is identical to the non-rolled // which means we can use aggregation's reduce method to combine, just as if @@ -275,27 +287,39 @@ private static SearchResponse doCombineResponse(SearchResponse liveResponse, Lis new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true)); } - // TODO allow profiling in the future - InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), currentTree, null, null, - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum()); + return mergeFinalResponse(liveResponse, rolledResponses, currentTree); + } + + private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, List rolledResponses, + InternalAggregations aggs) { int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum(); int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum(); int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum(); long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ; + boolean isTimedOut = rolledResponses.stream().anyMatch(SearchResponse::isTimedOut); + boolean isTerminatedEarly = rolledResponses.stream() + .filter(r -> r.isTerminatedEarly() != null) + .anyMatch(SearchResponse::isTerminatedEarly); + int numReducePhases = rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum(); + if (liveResponse != null) { totalShards += liveResponse.getTotalShards(); sucessfulShards += liveResponse.getSuccessfulShards(); skippedShards += liveResponse.getSkippedShards(); took = Math.max(took, liveResponse.getTook().getMillis()); + isTimedOut = isTimedOut && liveResponse.isTimedOut(); + isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly(); + numReducePhases += liveResponse.getNumReducePhases(); } + InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), aggs, null, null, + isTimedOut, isTerminatedEarly, numReducePhases); + // Shard failures are ignored atm, so returning an empty array is fine return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards, - took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); + took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); } /** diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 9f20fba8e92da..f0600d80f82a6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -158,7 +158,8 @@ static void updateMapping(RollupJob job, ActionListener li MappingMetaData mappings = getMappingResponse.getMappings().get(indexName).get(RollupField.TYPE_NAME); Object m = mappings.getSourceAsMap().get("_meta"); if (m == null) { - String msg = "Expected to find _meta key in mapping of rollup index [" + indexName + "] but not found."; + String msg = "Rollup data cannot be added to existing indices that contain non-rollup data (expected " + + "to find _meta key in mapping of rollup index [" + indexName + "] but not found)."; logger.error(msg); listener.onFailure(new RuntimeException(msg)); return; @@ -166,8 +167,9 @@ static void updateMapping(RollupJob job, ActionListener li Map metadata = (Map) m; if (metadata.get(RollupField.ROLLUP_META) == null) { - String msg = "Expected to find rollup meta key [" + RollupField.ROLLUP_META + "] in mapping of rollup index [" + indexName - + "] but not found."; + String msg = "Rollup data cannot be added to existing indices that contain non-rollup data (expected " + + "to find rollup meta key [" + RollupField.ROLLUP_META + "] in mapping of rollup index [" + + indexName + "] but not found)."; logger.error(msg); listener.onFailure(new RuntimeException(msg)); return; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index c63ab96fa2595..ea0319c34328b 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -155,6 +155,18 @@ static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWrite rolledSearchSource.size(0); AggregatorFactories.Builder sourceAgg = request.source().aggregations(); + // If there are no aggs in the request, our translation won't create any msearch. + // So just add an dummy request to the msearch and return. This is a bit silly + // but maintains how the regular search API behaves + if (sourceAgg == null || sourceAgg.count() == 0) { + + // Note: we can't apply any query rewriting or filtering on the query because there + // are no validated caps, so we have no idea what job is intended here. The only thing + // this affects is doc count, since hits and aggs will both be empty it doesn't really matter. + msearch.add(new SearchRequest(context.getRollupIndices(), request.source()).types(request.types())); + return msearch; + } + // Find our list of "best" job caps Set validatedCaps = new HashSet<>(); sourceAgg.getAggregatorFactories() @@ -248,11 +260,6 @@ static void validateSearchRequest(SearchRequest request) { if (request.source().explain() != null && request.source().explain()) { throw new IllegalArgumentException("Rollup search does not support explaining."); } - - // Rollup is only useful if aggregations are set, throw an exception otherwise - if (request.source().aggregations() == null) { - throw new IllegalArgumentException("Rollup requires at least one aggregation to be set."); - } } static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCaps) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java index 9119a5445d42e..94d64b17de8f3 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.rollup.Rollup; import java.util.ArrayList; @@ -46,7 +46,7 @@ class IndexerUtils { * @param isUpgradedDocID `true` if this job is using the new ID scheme * @return A list of rolled documents derived from the response */ - static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupJobStats stats, + static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupIndexerJobStats stats, GroupConfig groupConfig, String jobId, boolean isUpgradedDocID) { logger.debug("Buckets: [" + agg.getBuckets().size() + "][" + jobId + "]"); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 87294706b3b7d..b1b052a3659d6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -5,31 +5,45 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.indexing.IterationResult; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; +import org.joda.time.DateTimeZone; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,26 +51,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.xpack.core.rollup.RollupField.formatFieldName; + /** - * An abstract class that builds a rollup index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, - * it will create the rollup index from the source index up to the last complete bucket that is allowed to be built (based on the current - * time and the delay set on the rollup job). Only one background job can run simultaneously and {@link #onFinish()} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * An abstract implementation of {@link AsyncTwoPhaseIndexer} that builds a rollup index incrementally. */ -public abstract class RollupIndexer { - private static final Logger logger = Logger.getLogger(RollupIndexer.class.getName()); - +public abstract class RollupIndexer extends AsyncTwoPhaseIndexer, RollupIndexerJobStats> { static final String AGGREGATION_NAME = RollupField.NAME; private final RollupJob job; - private final RollupJobStats stats; - private final AtomicReference state; - private final AtomicReference> position; - private final Executor executor; protected final AtomicBoolean upgradedDocumentID; - private final CompositeAggregationBuilder compositeBuilder; private long maxBoundary; @@ -66,84 +70,16 @@ public abstract class RollupIndexer { * @param job The rollup job * @param initialState Initial state for the indexer * @param initialPosition The last indexed bucket of the task + * @param upgradedDocumentID whether job has updated IDs (for BWC) */ - RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, - Map initialPosition, AtomicBoolean upgradedDocumentID) { - this.executor = executor; + RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, + AtomicBoolean upgradedDocumentID) { + super(executor, initialState, initialPosition, new RollupIndexerJobStats()); this.job = job; - this.stats = new RollupJobStats(); - this.state = initialState; - this.position = new AtomicReference<>(initialPosition); this.compositeBuilder = createCompositeBuilder(job.getConfig()); this.upgradedDocumentID = upgradedDocumentID; } - /** - * Executes the {@link SearchRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The search request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); - - /** - * Executes the {@link BulkRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The bulk request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); - - /** - * Called periodically during the execution of a background job. Implementation should - * persists the state somewhere and continue the execution asynchronously using next. - * - * @param state The current state of the indexer - * @param position The current position of the indexer - * @param next Runnable for the next phase - */ - protected abstract void doSaveState(IndexerState state, Map position, Runnable next); - - /** - * Called when a failure occurs in an async job causing the execution to stop. - * @param exc The exception - */ - protected abstract void onFailure(Exception exc); - - /** - * Called when a background job finishes. - */ - protected abstract void onFinish(); - - /** - * Called when a background job detects that the indexer is aborted causing the async execution - * to stop. - */ - protected abstract void onAbort(); - - /** - * Get the current state of the indexer. - */ - public IndexerState getState() { - return state.get(); - } - - /** - * Get the current position of the indexer. - */ - public Map getPosition() { - return position.get(); - } - - /** - * Get the stats of this indexer. - */ - public RollupJobStats getStats() { - return stats; - } - /** * Returns if this job has upgraded it's ID scheme yet or not */ @@ -151,229 +87,28 @@ public boolean isUpgradedDocumentID() { return upgradedDocumentID.get(); } - /** - * Sets the internal state to {@link IndexerState#STARTED} if the previous state was {@link IndexerState#STOPPED}. Setting the state to - * STARTED allows a job to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. - * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState start() { - state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); - return state.get(); - } - - /** - * Sets the internal state to {@link IndexerState#STOPPING} if an async job is running in the background and in such case - * {@link #onFinish()} will be called as soon as the background job detects that the indexer is stopped. If there is no job running when - * this function is called, the state is directly set to {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. - * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState stop() { - IndexerState currentState = state.updateAndGet(previousState -> { - if (previousState == IndexerState.INDEXING) { - return IndexerState.STOPPING; - } else if (previousState == IndexerState.STARTED) { - return IndexerState.STOPPED; - } else { - return previousState; - } - }); - return currentState; - } - - /** - * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if an async job is running in the background and in such - * case {@link #onAbort} will be called as soon as the background job detects that the indexer is aborted. If there is no job running - * when this function is called, it returns true and {@link #onAbort()} will never be called. - * @return true if the indexer is aborted, false if a background job is running and abort is delayed. - */ - public synchronized boolean abort() { - IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); - return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; - } - - /** - * Triggers a background job that builds the rollup index asynchronously iff there is no other job that runs - * and the indexer is started ({@link IndexerState#STARTED}. - * - * @param now The current time in milliseconds (used to limit the job to complete buckets) - * @return true if a job has been triggered, false otherwise - */ - public synchronized boolean maybeTriggerAsyncJob(long now) { - final IndexerState currentState = state.get(); - switch (currentState) { - case INDEXING: - case STOPPING: - case ABORTING: - logger.warn("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], but prior indexer is still running."); - return false; - - case STOPPED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() - + "] but job is stopped. Ignoring trigger."); - return false; - - case STARTED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - // Only valid time to start indexing is when we are STARTED but not currently INDEXING. - stats.incrementNumInvocations(1); - - // rounds the current time to its current bucket based on the date histogram interval. - // this is needed to exclude buckets that can still receive new documents. - DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } - - if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { - // fire off the search. Note this is async, the method will return from here - executor.execute(() -> doNextSearch(buildSearchRequest(), - ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); - logger.debug("Beginning to rollup [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - return true; - } else { - logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); - return false; - } - - default: - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } - } - - /** - * Checks the {@link IndexerState} and returns false if the execution - * should be stopped. - */ - private boolean checkState(IndexerState currentState) { - switch (currentState) { - case INDEXING: - // normal state; - return true; - - case STOPPING: - logger.info("Rollup job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); - doSaveState(finishAndSetState(), getPosition(), () -> {}); - return false; - - case STOPPED: - return false; - - case ABORTING: - logger.info("Requested shutdown of indexer for job [" + job.getConfig().getId() + "]"); - onAbort(); - return false; - - default: - // Anything other than indexing, aborting or stopping is unanticipated - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } + @Override + protected String getJobId() { + return job.getConfig().getId(); } - private void onBulkResponse(BulkResponse response, Map after) { - // TODO we should check items in the response and move after accordingly to resume the failing buckets ? - stats.incrementNumRollups(response.getItems().length); - if (response.hasFailures()) { - logger.warn("Error while attempting to bulk index rollup documents: " + response.buildFailureMessage()); - } - try { - if (checkState(getState()) == false) { - return ; - } - position.set(after); - ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); - // TODO probably something more intelligent than every-50 is needed - if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { - doSaveState(IndexerState.INDEXING, after, () -> doNextSearch(buildSearchRequest(), listener)); - } else { - doNextSearch(buildSearchRequest(), listener); - } - } catch (Exception e) { - finishWithFailure(e); + @Override + protected void onStartJob(long now) { + // this is needed to exclude buckets that can still receive new documents. + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); + long rounded = dateHisto.createRounding().round(now); + if (dateHisto.getDelay() != null) { + // if the job has a delay we filter all documents that appear before it. + maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); + } else { + maxBoundary = rounded; } } - private void onSearchResponse(SearchResponse searchResponse) { - try { - if (checkState(getState()) == false) { - return ; - } - if (searchResponse.getShardFailures().length != 0) { - throw new RuntimeException("Shard failures encountered while running indexer for rollup job [" - + job.getConfig().getId() + "]: " + Arrays.toString(searchResponse.getShardFailures())); - } - final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); - if (response == null) { - throw new IllegalStateException("Missing composite response for query: " + compositeBuilder.toString()); - } - stats.incrementNumPages(1); - if (response.getBuckets().isEmpty()) { - // this is the end... - logger.debug("Finished indexing for job [" + job.getConfig().getId() + "], saving state and shutting down."); - - // Change state first, then try to persist. This prevents in-progress STOPPING/ABORTING from - // being persisted as STARTED but then stop the job - doSaveState(finishAndSetState(), position.get(), this::onFinish); - return; - } - - final BulkRequest bulkRequest = new BulkRequest(); + @Override + protected SearchRequest buildSearchRequest() { // Indexer is single-threaded, and only place that the ID scheme can get upgraded is doSaveState(), so // we can pass down the boolean value rather than the atomic here - final List docs = IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), - stats, job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()); - docs.forEach(bulkRequest::add); - assert bulkRequest.requests().size() > 0; - doNextBulk(bulkRequest, - ActionListener.wrap( - bulkResponse -> onBulkResponse(bulkResponse, response.afterKey()), - exc -> finishWithFailure(exc) - ) - ); - } catch(Exception e) { - finishWithFailure(e); - } - } - - private void finishWithFailure(Exception exc) { - doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); - } - - private IndexerState finishAndSetState() { - return state.updateAndGet( - prev -> { - switch (prev) { - case INDEXING: - // ready for another job - return IndexerState.STARTED; - - case STOPPING: - // must be started again - return IndexerState.STOPPED; - - case ABORTING: - // abort and exit - onAbort(); - return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first - - case STOPPED: - // No-op. Shouldn't really be possible to get here (should have to go through STOPPING - // first which will be handled) but is harmless to no-op and we don't want to throw exception here - return IndexerState.STOPPED; - - default: - // any other state is unanticipated at this point - throw new IllegalStateException("Rollup job encountered an illegal state [" + prev + "]"); - } - }); - } - - private SearchRequest buildSearchRequest() { final Map position = getPosition(); SearchSourceBuilder searchSource = new SearchSourceBuilder() .size(0) @@ -384,6 +119,16 @@ private SearchRequest buildSearchRequest() { return new SearchRequest(job.getConfig().getIndexPattern()).source(searchSource); } + @Override + protected IterationResult> doProcess(SearchResponse searchResponse) { + final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); + + return new IterationResult<>( + IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), getStats(), + job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()), + response.afterKey(), response.getBuckets().isEmpty()); + } + /** * Creates a skeleton {@link CompositeAggregationBuilder} from the provided job config. * @param config The config for the job. @@ -391,24 +136,14 @@ private SearchRequest buildSearchRequest() { */ private CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig config) { final GroupConfig groupConfig = config.getGroupConfig(); - List> builders = new ArrayList<>(); - Map metadata = new HashMap<>(); - - // Add all the agg builders to our request in order: date_histo -> histo -> terms - if (groupConfig != null) { - builders.addAll(groupConfig.getDateHistogram().toBuilders()); - metadata.putAll(groupConfig.getDateHistogram().getMetadata()); - if (groupConfig.getHistogram() != null) { - builders.addAll(groupConfig.getHistogram().toBuilders()); - metadata.putAll(groupConfig.getHistogram().getMetadata()); - } - if (groupConfig.getTerms() != null) { - builders.addAll(groupConfig.getTerms().toBuilders()); - } - } + List> builders = createValueSourceBuilders(groupConfig); CompositeAggregationBuilder composite = new CompositeAggregationBuilder(AGGREGATION_NAME, builders); - config.getMetricsConfig().forEach(m -> m.toBuilders().forEach(composite::subAggregation)); + + List aggregations = createAggregationBuilders(config.getMetricsConfig()); + aggregations.forEach(composite::subAggregation); + + final Map metadata = createMetadata(groupConfig); if (metadata.isEmpty() == false) { composite.setMetaData(metadata); } @@ -441,5 +176,127 @@ private QueryBuilder createBoundaryQuery(Map position) { .format("epoch_millis"); return query; } + + static Map createMetadata(final GroupConfig groupConfig) { + final Map metadata = new HashMap<>(); + if (groupConfig != null) { + // Add all the metadata in order: date_histo -> histo + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), dateHistogram.getInterval().toString()); + + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), histogram.getInterval()); + } + } + return metadata; + } + + public static List> createValueSourceBuilders(final GroupConfig groupConfig) { + final List> builders = new ArrayList<>(); + // Add all the agg builders to our request in order: date_histo -> histo -> terms + if (groupConfig != null) { + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + builders.addAll(createValueSourceBuilders(dateHistogram)); + + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + builders.addAll(createValueSourceBuilders(histogram)); + + final TermsGroupConfig terms = groupConfig.getTerms(); + builders.addAll(createValueSourceBuilders(terms)); + } + return Collections.unmodifiableList(builders); + } + + public static List> createValueSourceBuilders(final DateHistogramGroupConfig dateHistogram) { + final String dateHistogramField = dateHistogram.getField(); + final String dateHistogramName = RollupField.formatIndexerAggName(dateHistogramField, DateHistogramAggregationBuilder.NAME); + final DateHistogramValuesSourceBuilder dateHistogramBuilder = new DateHistogramValuesSourceBuilder(dateHistogramName); + dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); + dateHistogramBuilder.field(dateHistogramField); + dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); + return Collections.singletonList(dateHistogramBuilder); + } + + public static List> createValueSourceBuilders(final HistogramGroupConfig histogram) { + final List> builders = new ArrayList<>(); + if (histogram != null) { + for (String field : histogram.getFields()) { + final String histogramName = RollupField.formatIndexerAggName(field, HistogramAggregationBuilder.NAME); + final HistogramValuesSourceBuilder histogramBuilder = new HistogramValuesSourceBuilder(histogramName); + histogramBuilder.interval(histogram.getInterval()); + histogramBuilder.field(field); + histogramBuilder.missingBucket(true); + builders.add(histogramBuilder); + } + } + return Collections.unmodifiableList(builders); + } + + public static List> createValueSourceBuilders(final TermsGroupConfig terms) { + final List> builders = new ArrayList<>(); + if (terms != null) { + for (String field : terms.getFields()) { + final String termsName = RollupField.formatIndexerAggName(field, TermsAggregationBuilder.NAME); + final TermsValuesSourceBuilder termsBuilder = new TermsValuesSourceBuilder(termsName); + termsBuilder.field(field); + termsBuilder.missingBucket(true); + builders.add(termsBuilder); + } + } + return Collections.unmodifiableList(builders); + } + + /** + * This returns a set of aggregation builders which represent the configured + * set of metrics. Used to iterate over historical data. + */ + static List createAggregationBuilders(final List metricsConfigs) { + final List builders = new ArrayList<>(); + if (metricsConfigs != null) { + for (MetricConfig metricConfig : metricsConfigs) { + final List metrics = metricConfig.getMetrics(); + if (metrics.isEmpty() == false) { + final String field = metricConfig.getField(); + for (String metric : metrics) { + ValuesSourceAggregationBuilder.LeafOnly newBuilder; + if (metric.equals(MetricConfig.MIN.getPreferredName())) { + newBuilder = new MinAggregationBuilder(formatFieldName(field, MinAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.MAX.getPreferredName())) { + newBuilder = new MaxAggregationBuilder(formatFieldName(field, MaxAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.AVG.getPreferredName())) { + // Avgs are sum + count + newBuilder = new SumAggregationBuilder(formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.VALUE)); + ValuesSourceAggregationBuilder.LeafOnly countBuilder + = new ValueCountAggregationBuilder( + formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.COUNT_FIELD), ValueType.NUMERIC); + countBuilder.field(field); + builders.add(countBuilder); + } else if (metric.equals(MetricConfig.SUM.getPreferredName())) { + newBuilder = new SumAggregationBuilder(formatFieldName(field, SumAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MetricConfig.VALUE_COUNT.getPreferredName())) { + // TODO allow non-numeric value_counts. + // Hardcoding this is fine for now since the job validation guarantees that all metric fields are numerics + newBuilder = new ValueCountAggregationBuilder( + formatFieldName(field, ValueCountAggregationBuilder.NAME, RollupField.VALUE), ValueType.NUMERIC); + } else { + throw new IllegalArgumentException("Unsupported metric type [" + metric + "]"); + } + newBuilder.field(field); + builders.add(newBuilder); + } + } + } + } + return Collections.unmodifiableList(builders); + } + + private static DateTimeZone toDateTimeZone(final String timezone) { + try { + return DateTimeZone.forOffsetHours(Integer.parseInt(timezone)); + } catch (NumberFormatException e) { + return DateTimeZone.forID(timezone); + } + } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 65362f9ad9dd3..4a4b53575b238 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -25,13 +25,13 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.rollup.Rollup; @@ -218,7 +218,7 @@ public Status getStatus() { * Gets the stats for this task. * @return The stats of this task */ - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return indexer.getStats(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java index 00aeb0d06ab65..fcc1f2c4f5734 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java @@ -12,21 +12,19 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; - -import java.io.IOException; +import org.elasticsearch.xpack.rollup.Rollup; public class RestGetRollupJobsAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id"); public RestGetRollupJobsAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); + controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String id = restRequest.param(ID.getPreferredName()); GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(id); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 3235d0c39e256..c23151c4c6afe 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -61,6 +61,32 @@ public void testBiggerButCompatibleInterval() { assertThat(bestCaps.size(), equalTo(1)); } + public void testBiggerButCompatibleFixedInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100s"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("1000s")); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + + public void testBiggerButCompatibleFixedMillisInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100ms"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .interval(1000); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + public void testIncompatibleInterval() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -75,6 +101,20 @@ public void testIncompatibleInterval() { "[foo] which also satisfies all requirements of query.")); } + public void testIncompatibleFixedCalendarInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("5d"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("day")); + + RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + + "[foo] which also satisfies all requirements of query.")); + } + public void testBadTimeZone() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -385,6 +425,27 @@ public void testNoMatchingHistoInterval() { "[bar] which also satisfies all requirements of query.")); } + public void testHistoIntervalNotMultiple() { + HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo"); + histo.interval(10) // <--- interval is not a multiple of 3 + .field("bar") + .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) + .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); + + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", + new DateHistogramInterval("1d"), null, "UTC"), + new HistogramGroupConfig(3L, "bar"), + null); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + Exception e = expectThrows(RuntimeException.class, + () -> RollupJobIdentifierUtils.findBestJobs(histo, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field " + + "[bar] which also satisfies all requirements of query.")); + } + public void testMissingMetric() { int i = ESTestCase.randomIntBetween(0, 3); @@ -417,6 +478,105 @@ public void testMissingMetric() { } + public void testValidateFixedInterval() { + boolean valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(200, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(1000, new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(10*5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("second")); + assertFalse(valid); + + // ----------- + // Same tests, with both being DateHistoIntervals + // ----------- + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("200ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("1000ms"), + new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("5m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("20m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("second")); + assertFalse(valid); + } + + public void testValidateCalendarInterval() { + boolean valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("minute"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("month"), + new DateHistogramInterval("day")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("1d"), + new DateHistogramInterval("1s")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("1m")); + assertFalse(valid); + + // Fails because both are actually fixed + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertFalse(valid); + } + private Set singletonSet(RollupJobCaps cap) { Set caps = new HashSet<>(); caps.add(cap); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 35d9f0d133a3d..73a4d0665c4e1 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -198,10 +198,11 @@ public void testRolledMissingAggs() { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - Exception e = expectThrows(RuntimeException.class, - () -> RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); - assertThat(e.getMessage(), equalTo("Expected to find aggregations in rollup response, but none found.")); + SearchResponse response = RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertThat(responseAggs.asList().size(), equalTo(0)); } public void testMissingRolledIndex() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index 5599c50321cf1..3d346456ea98d 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -180,8 +180,9 @@ public void testNoMetadataInMapping() { ActionListener testListener = ActionListener.wrap(response -> { fail("Listener success should not have been triggered."); }, e -> { - assertThat(e.getMessage(), equalTo("Expected to find _meta key in mapping of rollup index [" - + job.getConfig().getRollupIndex() + "] but not found.")); + assertThat(e.getMessage(), equalTo("Rollup data cannot be added to existing indices that contain " + + "non-rollup data (expected to find _meta key in mapping of rollup index [" + + job.getConfig().getRollupIndex() + "] but not found).")); }); Logger logger = mock(Logger.class); @@ -206,6 +207,44 @@ public void testNoMetadataInMapping() { verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); } + @SuppressWarnings("unchecked") + public void testMetadataButNotRollup() { + RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); + + ActionListener testListener = ActionListener.wrap(response -> { + fail("Listener success should not have been triggered."); + }, e -> { + assertThat(e.getMessage(), equalTo("Rollup data cannot be added to existing indices that contain " + + "non-rollup data (expected to find rollup meta key [_rollup] in mapping of rollup index [" + + job.getConfig().getRollupIndex() + "] but not found).")); + }); + + Logger logger = mock(Logger.class); + Client client = mock(Client.class); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); + doAnswer(invocation -> { + GetMappingsResponse response = mock(GetMappingsResponse.class); + Map m = new HashMap<>(2); + m.put("random", + Collections.singletonMap(job.getConfig().getId(), job.getConfig())); + MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, + Collections.singletonMap("_meta", m)); + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(1); + builder.put(RollupField.TYPE_NAME, meta); + + ImmutableOpenMap.Builder> builder2 = ImmutableOpenMap.builder(1); + builder2.put(job.getConfig().getRollupIndex(), builder.build()); + + when(response.getMappings()).thenReturn(builder2.build()); + requestCaptor.getValue().onResponse(response); + return null; + }).when(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), requestCaptor.capture()); + + TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); + verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); + } + @SuppressWarnings("unchecked") public void testNoMappingVersion() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 069e23e4093de..3cc6190db30d5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -307,21 +307,22 @@ public void testExplain() { assertThat(e.getMessage(), equalTo("Rollup search does not support explaining.")); } - public void testNoAgg() { - String[] normalIndices = new String[]{randomAlphaOfLength(10)}; + public void testNoRollupAgg() { + String[] normalIndices = new String[]{}; String[] rollupIndices = new String[]{randomAlphaOfLength(10)}; TransportRollupSearchAction.RollupSearchContext ctx = new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet()); SearchSourceBuilder source = new SearchSourceBuilder(); source.query(new MatchAllQueryBuilder()); source.size(0); - SearchRequest request = new SearchRequest(normalIndices, source); + SearchRequest request = new SearchRequest(rollupIndices, source); NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class); - Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx)); - assertThat(e.getMessage(), equalTo("Rollup requires at least one aggregation to be set.")); + MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry, ctx); + assertThat(msearch.requests().size(), equalTo(1)); + assertThat(msearch.requests().get(0), equalTo(request)); } + public void testNoLiveNoRollup() { String[] normalIndices = new String[0]; String[] rollupIndices = new String[0]; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java new file mode 100644 index 0000000000000..c0ba74e762de6 --- /dev/null +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/job/RollupIndexTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.rollup.action.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; +import org.elasticsearch.xpack.rollup.job.RollupIndexer; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RollupIndexTests extends ESTestCase { + + public void testValidateMatchingField() { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + String type = getRandomType(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap(type, fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig("my_field"); + config.validateMappings(responseMap, e); + if (e.validationErrors().size() != 0) { + fail(e.getMessage()); + } + + List> builders = RollupIndexer.createValueSourceBuilders(config); + assertThat(builders.size(), equalTo(1)); + } + + public void testValidateFieldMatchingNotAggregatable() { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap(getRandomType(), fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig("my_field"); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + private String getRandomType() { + int n = randomIntBetween(0,8); + if (n == 0) { + return "keyword"; + } else if (n == 1) { + return "text"; + } else if (n == 2) { + return "long"; + } else if (n == 3) { + return "integer"; + } else if (n == 4) { + return "short"; + } else if (n == 5) { + return "float"; + } else if (n == 6) { + return "double"; + } else if (n == 7) { + return "scaled_float"; + } else if (n == 8) { + return "half_float"; + } + return "long"; + } +} diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java index e8c66f7e8c118..098bc83bc7034 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -40,7 +41,7 @@ import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTime; import org.mockito.stubbing.Answer; @@ -57,6 +58,7 @@ import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomDateHistogramGroupConfig; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomGroupConfig; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomHistogramGroupConfig; +import static org.elasticsearch.xpack.rollup.job.RollupIndexer.createAggregationBuilders; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -64,7 +66,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testMissingFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats = new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -101,9 +103,11 @@ public void testMissingFields() throws IOException { //TODO swap this over to DateHistoConfig.Builder once DateInterval is in DateHistogramGroupConfig dateHistoGroupConfig = new DateHistogramGroupConfig(timestampField, DateHistogramInterval.DAY); CompositeAggregationBuilder compositeBuilder = - new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, dateHistoGroupConfig.toBuilders()); + new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, + RollupIndexer.createValueSourceBuilders(dateHistoGroupConfig)); MetricConfig metricConfig = new MetricConfig("does_not_exist", singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -126,7 +130,7 @@ public void testMissingFields() throws IOException { public void testCorrectFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -170,7 +174,8 @@ public void testCorrectFields() throws IOException { singletonList(dateHisto)); MetricConfig metricConfig = new MetricConfig(valueField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -193,7 +198,7 @@ public void testCorrectFields() throws IOException { public void testNumericTerms() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats= new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -226,7 +231,8 @@ public void testNumericTerms() throws IOException { singletonList(terms)); MetricConfig metricConfig = new MetricConfig(valueField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, valueFieldType); aggregator.preCollection(); @@ -249,7 +255,7 @@ public void testNumericTerms() throws IOException { public void testEmptyCounts() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "ts"; String valueField = "the_avg"; @@ -292,7 +298,8 @@ public void testEmptyCounts() throws IOException { singletonList(dateHisto)); MetricConfig metricConfig = new MetricConfig("another_field", Arrays.asList("avg", "sum")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType); aggregator.preCollection(); @@ -355,7 +362,7 @@ public void testKeyOrderingOldID() { // The content of the config don't actually matter for this test // because the test is just looking at agg keys GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(123L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", false); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("1237859798")); } @@ -399,7 +406,7 @@ public void testKeyOrderingNewID() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA")); } @@ -449,7 +456,7 @@ public void testKeyOrderingNewIDLong() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw")); } @@ -476,14 +483,15 @@ public void testNullKeys() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean()); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), + groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(1)); assertFalse(Strings.isNullOrEmpty(docs.get(0).id())); } public void testMissingBuckets() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String metricField = "metric_field"; String valueField = "value_field"; @@ -523,11 +531,13 @@ public void testMissingBuckets() throws IOException { // Setup the composite agg TermsGroupConfig termsGroupConfig = new TermsGroupConfig(valueField); - CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, - termsGroupConfig.toBuilders()).size(numDocs*2); + CompositeAggregationBuilder compositeBuilder = + new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, RollupIndexer.createValueSourceBuilders(termsGroupConfig)) + .size(numDocs*2); MetricConfig metricConfig = new MetricConfig(metricField, singletonList("max")); - metricConfig.toBuilders().forEach(compositeBuilder::subAggregation); + List metricAgg = createAggregationBuilders(singletonList(metricConfig)); + metricAgg.forEach(compositeBuilder::subAggregation); Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, valueFieldType, metricFieldType); aggregator.preCollection(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 6d29ee9f9ba6d..55f1cfbdbb29c 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -50,10 +50,10 @@ import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Before; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 955dcbc2beb48..c74ecbadf4fbe 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.mockito.stubbing.Answer; @@ -639,7 +639,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -743,7 +743,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -763,7 +763,7 @@ public void testSearchShardFailure() throws Exception { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer failureConsumer = e -> { - assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for rollup job")); + assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for job")); isFinished.set(true); }; @@ -786,7 +786,7 @@ public void testSearchShardFailure() throws Exception { // Note: no pages processed, no docs were indexed assertThat(indexer.getStats().getNumPages(), equalTo(0L)); - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -896,7 +896,7 @@ protected void doNextBulk(BulkRequest request, ActionListener next assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java new file mode 100644 index 0000000000000..5ab85e2ffa743 --- /dev/null +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.rollup.job; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class RollupIndexerTests extends ESTestCase { + + public void testCreateMetadataNoGroupConfig() { + final Map metadata = RollupIndexer.createMetadata(null); + assertNotNull(metadata); + assertTrue(metadata.isEmpty()); + } + + public void testCreateMetadataWithDateHistogramGroupConfigOnly() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(dateHistogram.getInterval().toString())); + } + + public void testCreateMetadata() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final HistogramGroupConfig histogram = ConfigTestHelpers.randomHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram, histogram, null); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(histogram.getInterval())); + } +} + diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 13290f09e8eb8..a47d057b5d5b9 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; @@ -19,11 +20,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -47,6 +48,9 @@ public class RollupJobTaskTests extends ESTestCase { + private static final Settings SETTINGS = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "test") + .build(); private static ThreadPool pool = new TestThreadPool("test"); @AfterClass @@ -62,7 +66,7 @@ public void testInitialStatusStopped() { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -75,7 +79,7 @@ public void testInitialStatusAborting() { RollupJobStatus status = new RollupJobStatus(IndexerState.ABORTING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -88,7 +92,7 @@ public void testInitialStatusStopping() { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -101,7 +105,7 @@ public void testInitialStatusStarted() { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -114,7 +118,7 @@ public void testInitialStatusIndexingOldID() { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), false); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -128,7 +132,7 @@ public void testInitialStatusIndexingNewID() { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), true); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -141,7 +145,7 @@ public void testNoInitialStatus() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -154,7 +158,7 @@ public void testStartWhenStarted() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -641,7 +645,7 @@ public void testStopWhenStopped() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -748,7 +752,7 @@ public void testStopWhenAborting() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); CountDownLatch latch = new CountDownLatch(2); diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 6db533bbecf9b..71b22531ccab0 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -1,6 +1,7 @@ evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' +apply plugin: 'nebula.maven-scm' esplugin { name 'x-pack-security' description 'Elasticsearch Expanded Pack Plugin - Security' @@ -12,7 +13,8 @@ esplugin { archivesBaseName = 'x-pack-security' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -157,8 +159,7 @@ forbiddenPatterns { } forbiddenApisMain { - signaturesURLs += file('forbidden/ldap-signatures.txt').toURI().toURL() - signaturesURLs += file('forbidden/xml-signatures.txt').toURI().toURL() + signaturesFiles += files('forbidden/ldap-signatures.txt', 'forbidden/xml-signatures.txt') } // classes are missing, e.g. com.ibm.icu.lang.UCharacter @@ -241,7 +242,7 @@ thirdPartyAudit.excludes = [ 'javax.persistence.EntityManagerFactory', 'javax.persistence.EntityTransaction', 'javax.persistence.LockModeType', - 'javax/persistence/Query', + 'javax.persistence.Query', // [missing classes] OpenSAML storage and HttpClient cache have optional memcache support 'net.spy.memcached.CASResponse', 'net.spy.memcached.CASValue', @@ -265,7 +266,7 @@ thirdPartyAudit.excludes = [ 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', ] -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'javax.xml.bind.JAXBContext', 'javax.xml.bind.JAXBElement', diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 1a00b2a034000..377d10ec7f203 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -1,12 +1,15 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-security-cli' dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') - compile 'org.bouncycastle:bcprov-jdk15on:1.59' + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compile 'org.bouncycastle:bcpkix-jdk15on:1.59' + compile 'org.bouncycastle:bcprov-jdk15on:1.59' testImplementation 'com.google.jimfs:jimfs:1.1' testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -19,6 +22,14 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -if (inFipsJvm) { +if (project.inFipsJvm) { test.enabled = false -} \ No newline at end of file + // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. + tasks.withType(ForbiddenApisCliTask) { + bundledSignatures -= "jdk-non-portable" + } + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java index cd5a720eef1dc..28f2756cf262c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java @@ -13,12 +13,6 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140JKSKeystoreBootstrapCheck(Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +22,7 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final Settings settings = context.settings; Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) .filter(k -> settings.get(k).equalsIgnoreCase("jks")); @@ -50,6 +44,6 @@ public BootstrapCheckResult check(BootstrapContext context) { @Override public boolean alwaysEnforce() { - return fipsModeEnabled; + return true; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java index d1bce0dcdd24c..957276bdad2f3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java @@ -10,6 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.EnumSet; @@ -21,15 +22,9 @@ final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - private final boolean isInFipsMode; - - FIPS140LicenseBootstrapCheck(boolean isInFipsMode) { - this.isInFipsMode = isInFipsMode; - } - @Override public BootstrapCheckResult check(BootstrapContext context) { - if (isInFipsMode) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { License license = LicenseService.getLicense(context.metaData); if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java index 751d63be4fb3f..3faec3d747575 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java @@ -7,19 +7,12 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Locale; public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140PasswordHashingAlgorithmBootstrapCheck(final Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +21,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapC */ @Override public BootstrapCheckResult check(final BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings); if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index d31ffae13f245..2a392478cbcd9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -191,7 +191,6 @@ import org.elasticsearch.xpack.security.rest.action.oauth2.RestInvalidateTokenAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestDeletePrivilegesAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestGetPrivilegesAction; -import org.elasticsearch.xpack.security.rest.action.privilege.RestPutPrivilegeAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestPutPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.realm.RestClearRealmCacheAction; import org.elasticsearch.xpack.security.rest.action.role.RestClearRolesCacheAction; @@ -301,8 +300,9 @@ public Security(Settings settings, final Path configPath) { new PkiRealmBootstrapCheck(getSslService()), new TLSLicenseBootstrapCheck(), new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(settings), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings))); + new FIPS140JKSKeystoreBootstrapCheck(), + new FIPS140PasswordHashingAlgorithmBootstrapCheck(), + new FIPS140LicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateMaxDeterminizedStates(settings); @@ -762,7 +762,6 @@ public List getRestHandlers(Settings settings, RestController restC new RestSamlInvalidateSessionAction(settings, restController, getLicenseState()), new RestGetPrivilegesAction(settings, restController, getLicenseState()), new RestPutPrivilegesAction(settings, restController, getLicenseState()), - new RestPutPrivilegeAction(settings, restController, getLicenseState()), new RestDeletePrivilegesAction(settings, restController, getLicenseState()) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index d2507d51d0e88..9dd18be510f54 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -61,7 +61,7 @@ protected void doExecute(Task task, SamlAuthenticateRequest request, ActionListe final TimeValue expiresIn = tokenService.getExpirationDelay(); listener.onResponse( new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); - }, listener::onFailure), tokenMeta); + }, listener::onFailure), tokenMeta, true); }, e -> { logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); listener.onFailure(e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java index 3e489c69d1dbc..63931d119e0f2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index 358f6aee712df..23aaa9e0d992e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import java.io.IOException; import java.util.Collections; /** @@ -48,29 +49,52 @@ public TransportCreateTokenAction(Settings settings, ThreadPool threadPool, Tran @Override protected void doExecute(Task task, CreateTokenRequest request, ActionListener listener) { + CreateTokenRequest.GrantType type = CreateTokenRequest.GrantType.fromString(request.getGrantType()); + assert type != null : "type should have been validated in the action"; + switch (type) { + case PASSWORD: + authenticateAndCreateToken(request, listener); + break; + case CLIENT_CREDENTIALS: + Authentication authentication = Authentication.getAuthentication(threadPool.getThreadContext()); + createToken(request, authentication, authentication, false, listener); + break; + default: + listener.onFailure(new IllegalStateException("grant_type [" + request.getGrantType() + + "] is not supported by the create token action")); + break; + } + } + + private void authenticateAndCreateToken(CreateTokenRequest request, ActionListener listener) { Authentication originatingAuthentication = Authentication.getAuthentication(threadPool.getThreadContext()); try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { final UsernamePasswordToken authToken = new UsernamePasswordToken(request.getUsername(), request.getPassword()); authenticationService.authenticate(CreateTokenAction.NAME, request, authToken, - ActionListener.wrap(authentication -> { - request.getPassword().close(); - tokenService.createUserToken(authentication, originatingAuthentication, ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getUserTokenString(tuple.v1()); - final String scope = getResponseScopeValue(request.getScope()); + ActionListener.wrap(authentication -> { + request.getPassword().close(); + createToken(request, authentication, originatingAuthentication, true, listener); + }, e -> { + // clear the request password + request.getPassword().close(); + listener.onFailure(e); + })); + } + } + + private void createToken(CreateTokenRequest request, Authentication authentication, Authentication originatingAuth, + boolean includeRefreshToken, ActionListener listener) { + try { + tokenService.createUserToken(authentication, originatingAuth, ActionListener.wrap(tuple -> { + final String tokenStr = tokenService.getUserTokenString(tuple.v1()); + final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = - new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); - listener.onResponse(response); - }, e -> { - // clear the request password - request.getPassword().close(); - listener.onFailure(e); - }), Collections.emptyMap()); - }, e -> { - // clear the request password - request.getPassword().close(); - listener.onFailure(e); - })); + final CreateTokenResponse response = + new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + listener.onResponse(response); + }, listener::onFailure), Collections.emptyMap(), includeRefreshToken); + } catch (IOException e) { + listener.onFailure(e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java index af56ab8d4eb7d..57510ce116f7d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.function.Supplier; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java index f89745d23e317..7e17cda75f0ab 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java index eefaaa72b1e04..b49984b28da08 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.support.Automatons; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java index 8dcaf1a61ff47..3f19d28192583 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java @@ -9,7 +9,7 @@ import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import java.net.InetAddress; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java index a9245c7653e04..3cd12b1a7ceb9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -12,7 +12,7 @@ import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import java.net.InetAddress; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index 6d12455fdea6e..d8b4b4e4bc198 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -55,7 +55,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.security.audit.AuditLevel; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 3a6cfa501935c..5da6a9eb77cdd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -30,7 +30,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditLevel; import org.elasticsearch.xpack.security.audit.AuditTrail; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index 416acfca3abd2..c3888ba9453c9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -31,13 +31,15 @@ import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.authc.support.RealmUserLookup; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -381,33 +383,18 @@ private void consumeUser(User user, Map> message * names of users that exist using a timing attack */ private void lookupRunAsUser(final User user, String runAsUsername, Consumer userConsumer) { - final List realmsList = realms.asList(); - final BiConsumer> realmLookupConsumer = (realm, lookupUserListener) -> - realm.lookupUser(runAsUsername, ActionListener.wrap((lookedupUser) -> { - if (lookedupUser != null) { - lookedupBy = new RealmRef(realm.name(), realm.type(), nodeName); - lookupUserListener.onResponse(lookedupUser); - } else { - lookupUserListener.onResponse(null); - } - }, lookupUserListener::onFailure)); - - final IteratingActionListener userLookupListener = - new IteratingActionListener<>(ActionListener.wrap((lookupUser) -> { - if (lookupUser == null) { - // the user does not exist, but we still create a User object, which will later be rejected by authz - userConsumer.accept(new User(runAsUsername, null, user)); - } else { - userConsumer.accept(new User(lookupUser, user)); - } - }, - (e) -> listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken))), - realmLookupConsumer, realmsList, threadContext); - try { - userLookupListener.run(); - } catch (Exception e) { - listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken)); - } + final RealmUserLookup lookup = new RealmUserLookup(realms.asList(), threadContext); + lookup.lookup(runAsUsername, ActionListener.wrap(tuple -> { + if (tuple == null) { + // the user does not exist, but we still create a User object, which will later be rejected by authz + userConsumer.accept(new User(runAsUsername, null, user)); + } else { + User foundUser = Objects.requireNonNull(tuple.v1()); + Realm realm = Objects.requireNonNull(tuple.v2()); + lookedupBy = new RealmRef(realm.name(), realm.type(), nodeName); + userConsumer.accept(new User(foundUser, user)); + } + }, exception -> listener.onFailure(request.exceptionProcessingRequest(exception, authenticationToken)))); } /** diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 8b80c1f1d1ca9..d2573b9343d06 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -93,6 +93,7 @@ public Realms(Settings settings, Environment env, Map fac this.standardRealmsOnly = Collections.unmodifiableList(standardRealms); this.nativeRealmsOnly = Collections.unmodifiableList(nativeRealms); + realms.forEach(r -> r.initialize(this, licenseState)); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 8b6dd8295d399..937bd22d98206 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -212,7 +212,8 @@ public static Boolean isTokenServiceEnabled(Settings settings) { * The created token will be stored in the security index. */ public void createUserToken(Authentication authentication, Authentication originatingClientAuth, - ActionListener> listener, Map metadata) throws IOException { + ActionListener> listener, Map metadata, + boolean includeRefreshToken) throws IOException { ensureEnabled(); if (authentication == null) { listener.onFailure(new IllegalArgumentException("authentication must be provided")); @@ -226,13 +227,14 @@ public void createUserToken(Authentication authentication, Authentication origin new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), version); final UserToken userToken = new UserToken(version, matchingVersionAuth, expiration, metadata); - final String refreshToken = UUIDs.randomBase64UUID(); + final String refreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.field("doc_type", "token"); builder.field("creation_time", created.toEpochMilli()); - builder.startObject("refresh_token") + if (includeRefreshToken) { + builder.startObject("refresh_token") .field("token", refreshToken) .field("invalidated", false) .field("refreshed", false) @@ -242,6 +244,7 @@ public void createUserToken(Authentication authentication, Authentication origin .field("realm", originatingClientAuth.getAuthenticatedBy().getName()) .endObject() .endObject(); + } builder.startObject("access_token") .field("invalidated", false) .field("user_token", userToken) @@ -734,7 +737,7 @@ private void innerRefresh(String tokenDocId, Authentication userAuth, ActionList .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, updateRequest, ActionListener.wrap( - updateResponse -> createUserToken(authentication, userAuth, listener, metadata), + updateResponse -> createUserToken(authentication, userAuth, listener, metadata, true), e -> { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof VersionConflictEngineException || diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java index ffd9c3f73bcad..a84b76beab8bb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -11,7 +11,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 507ed4684a14e..d923a0298041b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -48,8 +48,8 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; -import org.elasticsearch.protocol.xpack.security.User.Fields; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.User.Fields; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 99c138bbb121d..3372667191106 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -24,12 +24,13 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -149,6 +150,8 @@ private User getUser(String username, ReservedUserInfo userInfo) { return new LogstashSystemUser(userInfo.enabled); case BeatsSystemUser.NAME: return new BeatsSystemUser(userInfo.enabled); + case APMSystemUser.NAME: + return new APMSystemUser(userInfo.enabled); default: if (anonymousEnabled && anonymousUser.principal().equals(username)) { return anonymousUser; @@ -177,6 +180,9 @@ public void users(ActionListener> listener) { userInfo = reservedUserInfos.get(BeatsSystemUser.NAME); users.add(new BeatsSystemUser(userInfo == null || userInfo.enabled)); + userInfo = reservedUserInfos.get(APMSystemUser.NAME); + users.add(new APMSystemUser(userInfo == null || userInfo.enabled)); + if (anonymousEnabled) { users.add(anonymousUser); } @@ -226,12 +232,12 @@ private boolean userIsDefinedForCurrentSecurityMapping(String username) { private Version getDefinedVersion(String username) { switch (username) { - case LogstashSystemUser.NAME: - return LogstashSystemUser.DEFINED_SINCE; case BeatsSystemUser.NAME: return BeatsSystemUser.DEFINED_SINCE; + case APMSystemUser.NAME: + return APMSystemUser.DEFINED_SINCE; default: - return Version.V_5_0_0; + return Version.V_6_0_0; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java index d9971ab2388f8..3f636312f0f06 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; /** * Like User, but includes the hashed password diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 336acbdb18175..fad10c821c85d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -63,7 +64,8 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789").toCharArray(); - public static final List USERS = asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + public static final List USERS = asList(ElasticUser.NAME, APMSystemUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME); private final BiFunction clientFunction; private final CheckedFunction keyStoreFunction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java index 8d529897534e5..e2586ea836dec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import java.util.Map; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java index 220108b563755..15a6c2c41daed 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.core.security.support.Validation; import org.elasticsearch.xpack.core.security.support.Validation.Users; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityFiles; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index 53146203ee2f1..9c531d3159f1b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -13,14 +13,16 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.ietf.jgss.GSSException; @@ -63,6 +65,7 @@ public final class KerberosRealm extends Realm implements CachingRealm { private final Path keytabPath; private final boolean enableKerberosDebug; private final boolean removeRealmName; + private DelegatedAuthorizationSupport delegatedRealms; public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nativeRoleMappingStore, final ThreadPool threadPool) { this(config, nativeRoleMappingStore, new KerberosTicketValidator(), threadPool, null); @@ -100,6 +103,15 @@ public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nati } this.enableKerberosDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); this.removeRealmName = KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.get(config.settings()); + this.delegatedRealms = null; + } + + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } @Override @@ -133,13 +145,14 @@ public AuthenticationToken token(final ThreadContext context) { @Override public void authenticate(final AuthenticationToken token, final ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; assert token instanceof KerberosAuthenticationToken; final KerberosAuthenticationToken kerbAuthnToken = (KerberosAuthenticationToken) token; kerberosTicketValidator.validateTicket((byte[]) kerbAuthnToken.credentials(), keytabPath, enableKerberosDebug, ActionListener.wrap(userPrincipalNameOutToken -> { if (userPrincipalNameOutToken.v1() != null) { final String username = maybeRemoveRealmName(userPrincipalNameOutToken.v1()); - buildUser(username, userPrincipalNameOutToken.v2(), listener); + resolveUser(username, userPrincipalNameOutToken.v2(), listener); } else { /** * This is when security context could not be established may be due to ongoing @@ -192,35 +205,36 @@ private void handleException(Exception e, final ActionListener listener) { + private void resolveUser(final String username, final String outToken, final ActionListener listener) { // if outToken is present then it needs to be communicated with peer, add it to // response header in thread context. if (Strings.hasText(outToken)) { threadPool.getThreadContext().addResponseHeader(WWW_AUTHENTICATE, NEGOTIATE_AUTH_HEADER_PREFIX + outToken); } - final User user = (userPrincipalNameToUserCache != null) ? userPrincipalNameToUserCache.get(username) : null; - if (user != null) { - /** - * TODO: bizybot If authorizing realms configured, resolve user from those - * realms and then return. - */ - listener.onResponse(AuthenticationResult.success(user)); + + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(username, listener); } else { - /** - * TODO: bizybot If authorizing realms configured, resolve user from those - * realms, cache it and then return. - */ - final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(username, null, Collections.emptySet(), null, this.config); - userRoleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { - final User computedUser = new User(username, roles.toArray(new String[roles.size()]), null, null, null, true); - if (userPrincipalNameToUserCache != null) { - userPrincipalNameToUserCache.put(username, computedUser); - } - listener.onResponse(AuthenticationResult.success(computedUser)); - }, listener::onFailure)); + final User user = (userPrincipalNameToUserCache != null) ? userPrincipalNameToUserCache.get(username) : null; + if (user != null) { + listener.onResponse(AuthenticationResult.success(user)); + } else { + buildUser(username, listener); + } } } + private void buildUser(final String username, final ActionListener listener) { + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(username, null, Collections.emptySet(), null, this.config); + userRoleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User computedUser = new User(username, roles.toArray(new String[roles.size()]), null, null, null, true); + if (userPrincipalNameToUserCache != null) { + userPrincipalNameToUserCache.put(username, computedUser); + } + listener.onResponse(AuthenticationResult.success(computedUser)); + }, listener::onFailure)); + } + @Override public void lookupUser(final String username, final ActionListener listener) { listener.onResponse(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java index d175e1b229312..8107d7488188b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java @@ -32,7 +32,7 @@ import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index f689bc2878903..193b33b7d8fe9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -8,7 +8,6 @@ import com.unboundid.ldap.sdk.LDAPException; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; @@ -16,21 +15,25 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapLoadBalancing; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; @@ -53,7 +56,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { private final UserRoleMapper roleMapper; private final ThreadPool threadPool; private final TimeValue executionTimeout; - + private DelegatedAuthorizationSupport delegatedRealms; public LdapRealm(String type, RealmConfig config, SSLService sslService, ResourceWatcherService watcherService, @@ -118,6 +121,7 @@ static SessionFactory sessionFactory(RealmConfig config, SSLService sslService, */ @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; // we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want // network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable<>(listener, @@ -159,6 +163,14 @@ private ContextPreservingActionListener contextPreservingListener(L sessionListener); } + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); + } + @Override public void usageStats(ActionListener> listener) { super.usageStats(ActionListener.wrap(usage -> { @@ -171,39 +183,56 @@ public void usageStats(ActionListener> listener) { } private static void buildUser(LdapSession session, String username, ActionListener listener, - UserRoleMapper roleMapper) { + UserRoleMapper roleMapper, DelegatedAuthorizationSupport delegatedAuthz) { + assert delegatedAuthz != null : "DelegatedAuthorizationSupport is null"; if (session == null) { listener.onResponse(AuthenticationResult.notHandled()); + } else if (delegatedAuthz.hasDelegation()) { + delegatedAuthz.resolve(username, listener); } else { - boolean loadingGroups = false; - try { - final Consumer onFailure = e -> { - IOUtils.closeWhileHandlingException(session); - listener.onFailure(e); - }; - session.resolve(ActionListener.wrap((ldapData) -> { - final Map metadata = MapBuilder.newMapBuilder() - .put("ldap_dn", session.userDn()) - .put("ldap_groups", ldapData.groups) - .putAll(ldapData.metaData) - .map(); - final UserData user = new UserData(username, session.userDn(), ldapData.groups, - metadata, session.realm()); - roleMapper.resolveRoles(user, ActionListener.wrap( - roles -> { - IOUtils.close(session); - String[] rolesArray = roles.toArray(new String[roles.size()]); - listener.onResponse(AuthenticationResult.success( - new User(username, rolesArray, null, null, metadata, true)) - ); - }, onFailure - )); - }, onFailure)); - loadingGroups = true; - } finally { - if (loadingGroups == false) { - session.close(); - } + lookupUserFromSession(username, session, roleMapper, listener); + } + } + + @Override + protected void handleCachedAuthentication(User user, ActionListener listener) { + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(user.principal(), listener); + } else { + super.handleCachedAuthentication(user, listener); + } + } + + private static void lookupUserFromSession(String username, LdapSession session, UserRoleMapper roleMapper, + ActionListener listener) { + boolean loadingGroups = false; + try { + final Consumer onFailure = e -> { + IOUtils.closeWhileHandlingException(session); + listener.onFailure(e); + }; + session.resolve(ActionListener.wrap((ldapData) -> { + final Map metadata = MapBuilder.newMapBuilder() + .put("ldap_dn", session.userDn()) + .put("ldap_groups", ldapData.groups) + .putAll(ldapData.metaData) + .map(); + final UserData user = new UserData(username, session.userDn(), ldapData.groups, + metadata, session.realm()); + roleMapper.resolveRoles(user, ActionListener.wrap( + roles -> { + IOUtils.close(session); + String[] rolesArray = roles.toArray(new String[roles.size()]); + listener.onResponse(AuthenticationResult.success( + new User(username, rolesArray, null, null, metadata, true)) + ); + }, onFailure + )); + }, onFailure)); + loadingGroups = true; + } finally { + if (loadingGroups == false) { + session.close(); } } } @@ -233,7 +262,7 @@ public void onResponse(LdapSession session) { resultListener.onResponse(AuthenticationResult.notHandled()); } else { ldapSessionAtomicReference.set(session); - buildUser(session, username, resultListener, roleMapper); + buildUser(session, username, resultListener, roleMapper, delegatedRealms); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java index 36d14aa67c0de..70b2f0015cf7a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java index 2ec87888d8c13..a3541ec2759b3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java index 367bd525036e2..986fa1900e7c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 58e10a5475571..4d13f332ffe20 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; @@ -26,17 +27,17 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import javax.net.ssl.X509TrustManager; - import java.security.MessageDigest; import java.security.cert.Certificate; import java.security.cert.CertificateEncodingException; @@ -75,6 +76,7 @@ public class PkiRealm extends Realm implements CachingRealm { private final Pattern principalPattern; private final UserRoleMapper roleMapper; private final Cache cache; + private DelegatedAuthorizationSupport delegatedRealms; public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { this(config, new CompositeRoleMapper(PkiRealmSettings.TYPE, config, watcherService, nativeRoleMappingStore)); @@ -91,6 +93,15 @@ public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, Nativ .setExpireAfterWrite(PkiRealmSettings.CACHE_TTL_SETTING.get(config.settings())) .setMaximumWeight(PkiRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) .build(); + this.delegatedRealms = null; + } + + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } @Override @@ -105,32 +116,50 @@ public X509AuthenticationToken token(ThreadContext context) { @Override public void authenticate(AuthenticationToken authToken, ActionListener listener) { + assert delegatedRealms != null : "Realm has not been initialized correctly"; X509AuthenticationToken token = (X509AuthenticationToken)authToken; try { final BytesKey fingerprint = computeFingerprint(token.credentials()[0]); User user = cache.get(fingerprint); if (user != null) { - listener.onResponse(AuthenticationResult.success(user)); + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(token.principal(), listener); + } else { + listener.onResponse(AuthenticationResult.success(user)); + } } else if (isCertificateChainTrusted(trustManager, token, logger) == false) { listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " is not trusted", null)); } else { - final Map metadata = Collections.singletonMap("pki_dn", token.dn()); - final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), - token.dn(), Collections.emptySet(), metadata, this.config); - roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { - final User computedUser = - new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); - try (ReleasableLock ignored = readLock.acquire()) { - cache.put(fingerprint, computedUser); + final ActionListener cachingListener = ActionListener.wrap(result -> { + if (result.isAuthenticated()) { + try (ReleasableLock ignored = readLock.acquire()) { + cache.put(fingerprint, result.getUser()); + } } - listener.onResponse(AuthenticationResult.success(computedUser)); - }, listener::onFailure)); + listener.onResponse(result); + }, listener::onFailure); + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(token.principal(), cachingListener); + } else { + this.buildUser(token, cachingListener); + } } } catch (CertificateEncodingException e) { listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " has encoding issues", e)); } } + private void buildUser(X509AuthenticationToken token, ActionListener listener) { + final Map metadata = Collections.singletonMap("pki_dn", token.dn()); + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), + token.dn(), Collections.emptySet(), metadata, this.config); + roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User computedUser = + new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); + listener.onResponse(AuthenticationResult.success(computedUser)); + }, listener::onFailure)); + } + @Override public void lookupUser(String username, ActionListener listener) { listener.onResponse(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index a8f50d975e8d7..4a9db7c5d612a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.ResourceWatcherService; @@ -43,13 +44,15 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.opensaml.core.criterion.EntityIdCriterion; import org.opensaml.saml.common.xml.SAMLConstants; @@ -117,6 +120,7 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAME_ATTRIBUTE; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.POPULATE_USER_METADATA; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.PRINCIPAL_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.REQUESTED_AUTHN_CONTEXT_CLASS_REF; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_KEY_ALIAS; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_MESSAGE_TYPES; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_SETTINGS; @@ -124,7 +128,6 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ENTITY_ID; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_LOGOUT; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.TYPE; -import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.REQUESTED_AUTHN_CONTEXT_CLASS_REF; /** * This class is {@link Releasable} because it uses a library that thinks timers and timer tasks @@ -166,6 +169,7 @@ public final class SamlRealm extends Realm implements Releasable { private final AttributeParser nameAttribute; private final AttributeParser mailAttribute; + private DelegatedAuthorizationSupport delegatedRealms; /** * Factory for SAML realm. @@ -231,6 +235,14 @@ public static SamlRealm create(RealmConfig config, SSLService sslService, Resour this.releasables = new ArrayList<>(); } + @Override + public void initialize(Iterable realms, XPackLicenseState licenseState) { + if (delegatedRealms != null) { + throw new IllegalStateException("Realm has already been initialized"); + } + delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); + } + static String require(RealmConfig config, Setting setting) { final String value = setting.get(config.settings()); if (value.isEmpty()) { @@ -402,14 +414,27 @@ public void authenticate(AuthenticationToken authenticationToken, ActionListener } } - private void buildUser(SamlAttributes attributes, ActionListener listener) { + private void buildUser(SamlAttributes attributes, ActionListener baseListener) { final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name()); if (Strings.isNullOrEmpty(principal)) { - listener.onResponse(AuthenticationResult.unsuccessful( + baseListener.onResponse(AuthenticationResult.unsuccessful( principalAttribute + " not found in " + attributes.attributes(), null)); return; } + final Map tokenMetadata = createTokenMetadata(attributes.name(), attributes.session()); + ActionListener wrappedListener = ActionListener.wrap(auth -> { + if (auth.isAuthenticated()) { + config.threadContext().putTransient(CONTEXT_TOKEN_DATA, tokenMetadata); + } + baseListener.onResponse(auth); + }, baseListener::onFailure); + + if (delegatedRealms.hasDelegation()) { + delegatedRealms.resolve(principal, wrappedListener); + return; + } + final Map userMeta = new HashMap<>(); if (populateUserMetadata) { for (SamlAttributes.SamlAttribute a : attributes.attributes()) { @@ -424,7 +449,6 @@ private void buildUser(SamlAttributes attributes, ActionListener tokenMetadata = createTokenMetadata(attributes.name(), attributes.session()); final List groups = groupsAttribute.getAttribute(attributes); final String dn = resolveSingleValueAttribute(attributes, dnAttribute, DN_ATTRIBUTE.name()); @@ -433,9 +457,8 @@ private void buildUser(SamlAttributes attributes, ActionListener { final User user = new User(principal, roles.toArray(new String[roles.size()]), name, mail, userMeta, true); - config.threadContext().putTransient(CONTEXT_TOKEN_DATA, tokenMetadata); - listener.onResponse(AuthenticationResult.success(user)); - }, listener::onFailure)); + wrappedListener.onResponse(AuthenticationResult.success(user)); + }, wrappedListener::onFailure)); } public Map createTokenMetadata(SamlNameId nameId, String session) { @@ -745,10 +768,10 @@ static AttributeParser forSetting(Logger logger, SamlRealmSettings.AttributeSett attributes -> attributes.getAttributeValues(attributeName)); } } else if (required) { - throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is required"); } else if (setting.getPattern().exists(settings)) { - throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + " cannot be set unless " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is also set"); } else { return new AttributeParser("No SAML attribute for [" + setting.name() + "]", attributes -> Collections.emptyList()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index bcdbc1e1dd305..af93a180072aa 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -5,11 +5,9 @@ */ package org.elasticsearch.xpack.security.authc.support; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ListenableFuture; @@ -20,7 +18,7 @@ import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Collections; import java.util.Map; @@ -30,7 +28,7 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm implements CachingRealm { - private final Cache>> cache; + private final Cache> cache; private final ThreadPool threadPool; final Hasher cacheHasher; @@ -38,17 +36,18 @@ protected CachingUsernamePasswordRealm(String type, RealmConfig config, ThreadPo super(type, config); cacheHasher = Hasher.resolve(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.get(config.settings())); this.threadPool = threadPool; - TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); + final TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); if (ttl.getNanos() > 0) { - cache = CacheBuilder.>>builder() - .setExpireAfterWrite(ttl) - .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) - .build(); + cache = CacheBuilder.>builder() + .setExpireAfterWrite(ttl) + .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) + .build(); } else { cache = null; } } + @Override public final void expire(String username) { if (cache != null) { logger.trace("invalidating cache for user [{}] in realm [{}]", username, name()); @@ -56,6 +55,7 @@ public final void expire(String username) { } } + @Override public final void expireAll() { if (cache != null) { logger.trace("invalidating cache for all users in realm [{}]", name()); @@ -72,109 +72,101 @@ public final void expireAll() { */ @Override public final void authenticate(AuthenticationToken authToken, ActionListener listener) { - UsernamePasswordToken token = (UsernamePasswordToken) authToken; + final UsernamePasswordToken token = (UsernamePasswordToken) authToken; try { if (cache == null) { doAuthenticate(token, listener); } else { authenticateWithCache(token, listener); } - } catch (Exception e) { + } catch (final Exception e) { // each realm should handle exceptions, if we get one here it should be considered fatal listener.onFailure(e); } } + /** + * This validates the {@code token} while making sure there is only one inflight + * request to the authentication source. Only successful responses are cached + * and any subsequent requests, bearing the same password, will succeed + * without reaching to the authentication source. A different password in a + * subsequent request, however, will clear the cache and try to reach to + * the authentication source. + * + * @param token The authentication token + * @param listener to be called at completion + */ private void authenticateWithCache(UsernamePasswordToken token, ActionListener listener) { try { - final SetOnce authenticatedUser = new SetOnce<>(); - final AtomicBoolean createdAndStartedFuture = new AtomicBoolean(false); - final ListenableFuture> future = cache.computeIfAbsent(token.principal(), k -> { - final ListenableFuture> created = new ListenableFuture<>(); - if (createdAndStartedFuture.compareAndSet(false, true) == false) { - throw new IllegalStateException("something else already started this. how?"); - } - return created; + final AtomicBoolean authenticationInCache = new AtomicBoolean(true); + final ListenableFuture listenableCacheEntry = cache.computeIfAbsent(token.principal(), k -> { + authenticationInCache.set(false); + return new ListenableFuture<>(); }); - - if (createdAndStartedFuture.get()) { - doAuthenticate(token, ActionListener.wrap(result -> { - if (result.isAuthenticated()) { - final User user = result.getUser(); - authenticatedUser.set(user); - final UserWithHash userWithHash = new UserWithHash(user, token.credentials(), cacheHasher); - future.onResponse(new Tuple<>(result, userWithHash)); + if (authenticationInCache.get()) { + // there is a cached or an inflight authenticate request + listenableCacheEntry.addListener(ActionListener.wrap(authenticatedUserWithHash -> { + if (authenticatedUserWithHash != null && authenticatedUserWithHash.verify(token.credentials())) { + // cached credential hash matches the credential hash for this forestalled request + handleCachedAuthentication(authenticatedUserWithHash.user, ActionListener.wrap(cacheResult -> { + if (cacheResult.isAuthenticated()) { + logger.debug("realm [{}] authenticated user [{}], with roles [{}]", + name(), token.principal(), cacheResult.getUser().roles()); + } else { + logger.debug("realm [{}] authenticated user [{}] from cache, but then failed [{}]", + name(), token.principal(), cacheResult.getMessage()); + } + listener.onResponse(cacheResult); + }, listener::onFailure)); } else { - future.onResponse(new Tuple<>(result, null)); - } - }, future::onFailure)); - } - - future.addListener(ActionListener.wrap(tuple -> { - if (tuple != null) { - final UserWithHash userWithHash = tuple.v2(); - final boolean performedAuthentication = createdAndStartedFuture.get() && userWithHash != null && - tuple.v2().user == authenticatedUser.get(); - handleResult(future, createdAndStartedFuture.get(), performedAuthentication, token, tuple, listener); - } else { - handleFailure(future, createdAndStartedFuture.get(), token, new IllegalStateException("unknown error authenticating"), - listener); - } - }, e -> handleFailure(future, createdAndStartedFuture.get(), token, e, listener)), - threadPool.executor(ThreadPool.Names.GENERIC)); - } catch (ExecutionException e) { - listener.onResponse(AuthenticationResult.unsuccessful("", e)); - } - } - - private void handleResult(ListenableFuture> future, boolean createdAndStartedFuture, - boolean performedAuthentication, UsernamePasswordToken token, - Tuple result, ActionListener listener) { - final AuthenticationResult authResult = result.v1(); - if (authResult == null) { - // this was from a lookup; clear and redo - cache.invalidate(token.principal(), future); - authenticateWithCache(token, listener); - } else if (authResult.isAuthenticated()) { - if (performedAuthentication) { - listener.onResponse(authResult); - } else { - UserWithHash userWithHash = result.v2(); - if (userWithHash.verify(token.credentials())) { - if (userWithHash.user.enabled()) { - User user = userWithHash.user; - logger.debug("realm [{}] authenticated user [{}], with roles [{}]", - name(), token.principal(), user.roles()); - listener.onResponse(AuthenticationResult.success(user)); - } else { - // re-auth to see if user has been enabled - cache.invalidate(token.principal(), future); + // The inflight request has failed or its credential hash does not match the + // hash of the credential for this forestalled request. + // clear cache and try to reach the authentication source again because password + // might have changed there and the local cached hash got stale + cache.invalidate(token.principal(), listenableCacheEntry); authenticateWithCache(token, listener); } - } else { - // could be a password change? - cache.invalidate(token.principal(), future); + }, e -> { + // the inflight request failed, so try again, but first (always) make sure cache + // is cleared of the failed authentication + cache.invalidate(token.principal(), listenableCacheEntry); authenticateWithCache(token, listener); - } - } - } else { - cache.invalidate(token.principal(), future); - if (createdAndStartedFuture) { - listener.onResponse(authResult); + }), threadPool.executor(ThreadPool.Names.GENERIC)); } else { - authenticateWithCache(token, listener); + // attempt authentication against the authentication source + doAuthenticate(token, ActionListener.wrap(authResult -> { + if (authResult.isAuthenticated() && authResult.getUser().enabled()) { + // compute the credential hash of this successful authentication request + final UserWithHash userWithHash = new UserWithHash(authResult.getUser(), token.credentials(), cacheHasher); + // notify any forestalled request listeners; they will not reach to the + // authentication request and instead will use this hash for comparison + listenableCacheEntry.onResponse(userWithHash); + } else { + // notify any forestalled request listeners; they will retry the request + listenableCacheEntry.onResponse(null); + } + // notify the listener of the inflight authentication request; this request is not retried + listener.onResponse(authResult); + }, e -> { + // notify any staved off listeners; they will retry the request + listenableCacheEntry.onFailure(e); + // notify the listener of the inflight authentication request; this request is not retried + listener.onFailure(e); + })); } + } catch (final ExecutionException e) { + listener.onFailure(e); } } - private void handleFailure(ListenableFuture> future, boolean createdAndStarted, - UsernamePasswordToken token, Exception e, ActionListener listener) { - cache.invalidate(token.principal(), future); - if (createdAndStarted) { - listener.onFailure(e); - } else { - authenticateWithCache(token, listener); - } + /** + * {@code handleCachedAuthentication} is called when a {@link User} is retrieved from the cache. + * The first {@code user} parameter is the user object that was found in the cache. + * The default implementation returns a {@link AuthenticationResult#success(User) success result} with the + * provided user, but sub-classes can return a different {@code User} object, or an unsuccessful result. + */ + protected void handleCachedAuthentication(User user, ActionListener listener) { + listener.onResponse(AuthenticationResult.success(user)); } @Override @@ -193,38 +185,57 @@ protected int getCacheSize() { @Override public final void lookupUser(String username, ActionListener listener) { - if (cache != null) { - try { - ListenableFuture> future = cache.computeIfAbsent(username, key -> { - ListenableFuture> created = new ListenableFuture<>(); - doLookupUser(username, ActionListener.wrap(user -> { - if (user != null) { - UserWithHash userWithHash = new UserWithHash(user, null, null); - created.onResponse(new Tuple<>(null, userWithHash)); - } else { - created.onResponse(new Tuple<>(null, null)); - } - }, created::onFailure)); - return created; - }); - - future.addListener(ActionListener.wrap(tuple -> { - if (tuple != null) { - if (tuple.v2() == null) { - cache.invalidate(username, future); - listener.onResponse(null); - } else { - listener.onResponse(tuple.v2().user); - } + try { + if (cache == null) { + doLookupUser(username, listener); + } else { + lookupWithCache(username, listener); + } + } catch (final Exception e) { + // each realm should handle exceptions, if we get one here it should be + // considered fatal + listener.onFailure(e); + } + } + + private void lookupWithCache(String username, ActionListener listener) { + try { + final AtomicBoolean lookupInCache = new AtomicBoolean(true); + final ListenableFuture listenableCacheEntry = cache.computeIfAbsent(username, key -> { + lookupInCache.set(false); + return new ListenableFuture<>(); + }); + if (false == lookupInCache.get()) { + // attempt lookup against the user directory + doLookupUser(username, ActionListener.wrap(user -> { + if (user != null) { + // user found + final UserWithHash userWithHash = new UserWithHash(user, null, null); + // notify forestalled request listeners + listenableCacheEntry.onResponse(userWithHash); } else { - listener.onResponse(null); + // user not found, invalidate cache so that subsequent requests are forwarded to + // the user directory + cache.invalidate(username, listenableCacheEntry); + // notify forestalled request listeners + listenableCacheEntry.onResponse(null); } - }, listener::onFailure), threadPool.executor(ThreadPool.Names.GENERIC)); - } catch (ExecutionException e) { - listener.onFailure(e); + }, e -> { + // the next request should be forwarded, not halted by a failed lookup attempt + cache.invalidate(username, listenableCacheEntry); + // notify forestalled listeners + listenableCacheEntry.onFailure(e); + })); } - } else { - doLookupUser(username, listener); + listenableCacheEntry.addListener(ActionListener.wrap(userWithHash -> { + if (userWithHash != null) { + listener.onResponse(userWithHash.user); + } else { + listener.onResponse(null); + } + }, listener::onFailure), threadPool.executor(ThreadPool.Names.GENERIC)); + } catch (final ExecutionException e) { + listener.onFailure(e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java new file mode 100644 index 0000000000000..ff6fc6042e7e8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.Strings.collectionToDelimitedString; + +/** + * Utility class for supporting "delegated authorization" (aka "authorization_realms", aka "lookup realms"). + * A {@link Realm} may support delegating authorization to another realm. It does this by registering a + * setting for {@link DelegatedAuthorizationSettings#AUTHZ_REALMS}, and constructing an instance of this + * class. Then, after the realm has performed any authentication steps, if {@link #hasDelegation()} is + * {@code true}, it delegates the construction of the {@link User} object and {@link AuthenticationResult} + * to {@link #resolve(String, ActionListener)}. + */ +public class DelegatedAuthorizationSupport { + + private final RealmUserLookup lookup; + private final Logger logger; + private final XPackLicenseState licenseState; + + /** + * Resolves the {@link DelegatedAuthorizationSettings#AUTHZ_REALMS} setting from {@code config} and calls + * {@link #DelegatedAuthorizationSupport(Iterable, List, Settings, ThreadContext, XPackLicenseState)} + */ + public DelegatedAuthorizationSupport(Iterable allRealms, RealmConfig config, XPackLicenseState licenseState) { + this(allRealms, DelegatedAuthorizationSettings.AUTHZ_REALMS.get(config.settings()), config.globalSettings(), config.threadContext(), + licenseState); + } + + /** + * Constructs a new object that delegates to the named realms ({@code lookupRealms}), which must exist within + * {@code allRealms}. + * @throws IllegalArgumentException if one of the specified realms does not exist + */ + protected DelegatedAuthorizationSupport(Iterable allRealms, List lookupRealms, Settings settings, + ThreadContext threadContext, XPackLicenseState licenseState) { + final List resolvedLookupRealms = resolveRealms(allRealms, lookupRealms); + checkForRealmChains(resolvedLookupRealms, settings); + this.lookup = new RealmUserLookup(resolvedLookupRealms, threadContext); + this.logger = Loggers.getLogger(getClass()); + this.licenseState = licenseState; + } + + /** + * Are there any realms configured for delegated lookup + */ + public boolean hasDelegation() { + return this.lookup.hasRealms(); + } + + /** + * Attempts to find the user specified by {@code username} in one of the delegated realms. + * The realms are searched in the order specified during construction. + * Returns a {@link AuthenticationResult#success(User) successful result} if a {@link User} + * was found, otherwise returns an + * {@link AuthenticationResult#unsuccessful(String, Exception) unsuccessful result} + * with a meaningful diagnostic message. + */ + public void resolve(String username, ActionListener resultListener) { + if (licenseState.isAuthorizationRealmAllowed() == false) { + resultListener.onResponse(AuthenticationResult.unsuccessful( + DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + " are not permitted", + LicenseUtils.newComplianceException(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey()) + )); + return; + } + if (hasDelegation() == false) { + resultListener.onResponse(AuthenticationResult.unsuccessful( + "No [" + DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + "] have been configured", null)); + return; + } + ActionListener> userListener = ActionListener.wrap(tuple -> { + if (tuple != null) { + logger.trace("Found user " + tuple.v1() + " in realm " + tuple.v2()); + resultListener.onResponse(AuthenticationResult.success(tuple.v1())); + } else { + resultListener.onResponse(AuthenticationResult.unsuccessful("the principal [" + username + + "] was authenticated, but no user could be found in realms [" + collectionToDelimitedString(lookup.getRealms(), ",") + + "]", null)); + } + }, resultListener::onFailure); + lookup.lookup(username, userListener); + } + + private List resolveRealms(Iterable allRealms, List lookupRealms) { + final List result = new ArrayList<>(lookupRealms.size()); + for (String name : lookupRealms) { + result.add(findRealm(name, allRealms)); + } + assert result.size() == lookupRealms.size(); + return result; + } + + /** + * Checks for (and rejects) chains of delegation in the provided realms. + * A chain occurs when "realmA" delegates authorization to "realmB", and realmB also delegates authorization (to any realm). + * Since "realmB" does not handle its own authorization, it is not a valid target for delegated authorization. + * @param delegatedRealms The list of realms that are going to be used for authorization. If is an error if any of these realms are + * also configured to delegate their authorization. + * @throws IllegalArgumentException if a chain is detected + */ + private void checkForRealmChains(Iterable delegatedRealms, Settings globalSettings) { + final Map settingsByRealm = RealmSettings.getRealmSettings(globalSettings); + for (Realm realm : delegatedRealms) { + final Settings realmSettings = settingsByRealm.get(realm.name()); + if (realmSettings != null && DelegatedAuthorizationSettings.AUTHZ_REALMS.exists(realmSettings)) { + throw new IllegalArgumentException("cannot use realm [" + realm + + "] as an authorization realm - it is already delegating authorization to [" + + DelegatedAuthorizationSettings.AUTHZ_REALMS.get(realmSettings) + "]"); + } + } + } + + private Realm findRealm(String name, Iterable allRealms) { + for (Realm realm : allRealms) { + if (name.equals(realm.name())) { + return realm; + } + } + throw new IllegalArgumentException("configured authorization realm [" + name + "] does not exist (or is not enabled)"); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java new file mode 100644 index 0000000000000..428b7c1e4a1cf --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookup.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.common.IteratingActionListener; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Collections; +import java.util.List; + +public class RealmUserLookup { + + private final List realms; + private final ThreadContext threadContext; + + public RealmUserLookup(List realms, ThreadContext threadContext) { + this.realms = realms; + this.threadContext = threadContext; + } + + public List getRealms() { + return Collections.unmodifiableList(realms); + } + + public boolean hasRealms() { + return realms.isEmpty() == false; + } + + /** + * Lookup the {@code principal} in the list of {@link #realms}. + * The realms are consulted in order. When one realm responds with a non-null {@link User}, this + * is returned with the matching realm, through the {@code listener}. + * If no user if found (including the case where the {@link #realms} list is empty), then + * {@link ActionListener#onResponse(Object)} is called with a {@code null} {@link Tuple}. + */ + public void lookup(String principal, ActionListener> listener) { + final IteratingActionListener, ? extends Realm> userLookupListener = + new IteratingActionListener<>(listener, + (realm, lookupUserListener) -> realm.lookupUser(principal, + ActionListener.wrap(foundUser -> { + if (foundUser != null) { + lookupUserListener.onResponse(new Tuple<>(foundUser, realm)); + } else { + lookupUserListener.onResponse(null); + } + }, + lookupUserListener::onFailure)), + realms, threadContext); + try { + userLookupListener.run(); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 2c9bb8ce3c0ce..642bc167f7d4a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -57,7 +57,7 @@ import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java index 07845a131b750..3068a3993d309 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java @@ -8,7 +8,7 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.xpack.core.security.authz.permission.Role; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 2247cbe02a812..34aed55bb2903 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -25,12 +25,11 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -419,7 +418,7 @@ private static class RemoteClusterResolver extends RemoteClusterAware { private RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings) { super(settings); - clusters = new CopyOnWriteArraySet<>(buildRemoteClustersSeeds(settings).keySet()); + clusters = new CopyOnWriteArraySet<>(buildRemoteClustersDynamicConfig(settings).keySet()); listenForUpdates(clusterSettings); } @@ -429,7 +428,7 @@ protected Set getRemoteClusterNames() { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java index 6db19d8edeb1b..15ac88b4d9462 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java @@ -10,7 +10,7 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.util.Arrays; import java.util.EnumSet; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java index 10b8e65c16824..b280b3a89a204 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java deleted file mode 100644 index 6c3ef8e70fabf..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.rest.action.privilege; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestRequest.Method.PUT; - -/** - * Rest endpoint to add one or more {@link ApplicationPrivilege} objects to the security index - */ -public class RestPutPrivilegeAction extends SecurityBaseRestHandler { - - public RestPutPrivilegeAction(Settings settings, RestController controller, XPackLicenseState licenseState) { - super(settings, licenseState); - controller.registerHandler(PUT, "/_xpack/security/privilege/{application}/{privilege}", this); - controller.registerHandler(POST, "/_xpack/security/privilege/{application}/{privilege}", this); - } - - @Override - public String getName() { - return "xpack_security_put_privilege_action"; - } - - @Override - public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - final String application = request.param("application"); - final String privilege = request.param("privilege"); - PutPrivilegesRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutPrivilege(application, privilege, request.requiredContent(), request.getXContentType()) - .setRefreshPolicy(request.param("refresh")); - - return RestPutPrivilegesAction.execute(requestBuilder); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java index eb1104c9bc036..dc565e3f87339 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java @@ -29,6 +29,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; /** * Rest endpoint to add one or more {@link ApplicationPrivilege} objects to the security index @@ -37,6 +38,7 @@ public class RestPutPrivilegesAction extends SecurityBaseRestHandler { public RestPutPrivilegesAction(Settings settings, RestController controller, XPackLicenseState licenseState) { super(settings, licenseState); + controller.registerHandler(PUT, "/_xpack/security/privilege/", this); controller.registerHandler(POST, "/_xpack/security/privilege/", this); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java index 7e33844e99bb1..1b64b3ce2baee 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java index 3e8cf26ad7d6b..1ab80954e9b53 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index 761af81b08ec5..fcbae00ba090a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -25,9 +25,8 @@ import org.elasticsearch.transport.nio.NioTcpChannel; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.action.SecurityActionMapper; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -116,50 +115,28 @@ requests from all the nodes are attached with a user (either a serialize } } - final Version version = transportChannel.getVersion().equals(Version.V_5_4_0) ? Version.CURRENT : transportChannel.getVersion(); + final Version version = transportChannel.getVersion(); authcService.authenticate(securityAction, request, (User)null, ActionListener.wrap((authentication) -> { - if (reservedRealmEnabled && authentication.getVersion().before(Version.V_5_2_0) && - KibanaUser.NAME.equals(authentication.getUser().authenticatedUser().principal())) { - executeAsCurrentVersionKibanaUser(securityAction, request, transportChannel, listener, authentication); - } else if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && - SystemUser.is(authentication.getUser()) == false) { - securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { - final Authentication replaced = Authentication.getAuthentication(threadContext); - final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); - asyncAuthorizer.authorize(authzService); - }, version); - } else { + if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && + SystemUser.is(authentication.getUser()) == false) { + securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { + final Authentication replaced = Authentication.getAuthentication(threadContext); final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { - authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); + new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { + authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); asyncAuthorizer.authorize(authzService); - } - }, listener::onFailure)); - } - - private void executeAsCurrentVersionKibanaUser(String securityAction, TransportRequest request, TransportChannel transportChannel, - ActionListener listener, Authentication authentication) { - // the authentication came from an older node - so let's replace the user with our version - final User kibanaUser = new KibanaUser(authentication.getUser().enabled()); - if (kibanaUser.enabled()) { - securityContext.executeAsUser(kibanaUser, (original) -> { - final Authentication replacedUserAuth = securityContext.getAuthentication(); + }, version); + } else { final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replacedUserAuth, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replacedUserAuth, securityAction, request, userRoles, runAsRoles); + new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { + authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); listener.onResponse(null); }); asyncAuthorizer.authorize(authzService); - }, transportChannel.getVersion()); - } else { - throw new IllegalStateException("a disabled user should never be sent. " + kibanaUser); - } + } + }, listener::onFailure)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java index 246b584a83b01..9317e9f8dcb5a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java @@ -7,10 +7,9 @@ import org.apache.http.HttpEntity; import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; @@ -18,9 +17,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import java.io.IOException; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -28,64 +25,59 @@ import static org.hamcrest.Matchers.not; /** - * a helper class that contains a couple of HTTP helper methods + * A helper class that contains a couple of HTTP helper methods. */ public abstract class AbstractPrivilegeTestCase extends SecuritySingleNodeTestCase { - protected void assertAccessIsAllowed(String user, String method, String uri, String body, - Map params) throws IOException { - Response response = getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + protected void assertAccessIsAllowed(String user, Request request) throws IOException { + setUser(request, user); + Response response = getRestClient().performRequest(request); StatusLine statusLine = response.getStatusLine(); - String message = String.format(Locale.ROOT, "%s %s: Expected no error got %s %s with body %s", method, uri, - statusLine.getStatusCode(), statusLine.getReasonPhrase(), EntityUtils.toString(response.getEntity())); + String message = String.format(Locale.ROOT, "%s %s: Expected no error got %s %s with body %s", + request.getMethod(), request.getEndpoint(), statusLine.getStatusCode(), + statusLine.getReasonPhrase(), EntityUtils.toString(response.getEntity())); assertThat(message, statusLine.getStatusCode(), is(not(greaterThanOrEqualTo(400)))); } protected void assertAccessIsAllowed(String user, String method, String uri, String body) throws IOException { - assertAccessIsAllowed(user, method, uri, body, new HashMap<>()); + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertAccessIsAllowed(user, request); } protected void assertAccessIsAllowed(String user, String method, String uri) throws IOException { - assertAccessIsAllowed(user, method, uri, null, new HashMap<>()); + assertAccessIsAllowed(user, new Request(method, uri)); } - protected void assertAccessIsDenied(String user, String method, String uri, String body) throws IOException { - assertAccessIsDenied(user, method, uri, body, new HashMap<>()); - } - - protected void assertAccessIsDenied(String user, String method, String uri) throws IOException { - assertAccessIsDenied(user, method, uri, null, new HashMap<>()); - } - - protected void assertAccessIsDenied(String user, String method, String uri, String body, - Map params) throws IOException { - ResponseException responseException = expectThrows(ResponseException.class, - () -> getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray()))))); + protected void assertAccessIsDenied(String user, Request request) throws IOException { + setUser(request, user); + ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); StatusLine statusLine = responseException.getResponse().getStatusLine(); - String message = String.format(Locale.ROOT, "%s %s body %s: Expected 403, got %s %s with body %s", method, uri, body, + String requestBody = request.getEntity() == null ? "" : "with body " + EntityUtils.toString(request.getEntity()); + String message = String.format(Locale.ROOT, "%s %s body %s: Expected 403, got %s %s with body %s", + request.getMethod(), request.getEndpoint(), requestBody, statusLine.getStatusCode(), statusLine.getReasonPhrase(), EntityUtils.toString(responseException.getResponse().getEntity())); assertThat(message, statusLine.getStatusCode(), is(403)); } + protected void assertAccessIsDenied(String user, String method, String uri, String body) throws IOException { + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertAccessIsDenied(user, request); + } - protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body) throws IOException { - assertBodyHasAccessIsDenied(user, method, uri, body, new HashMap<>()); + protected void assertAccessIsDenied(String user, String method, String uri) throws IOException { + assertAccessIsDenied(user, new Request(method, uri)); } /** * Like {@code assertAcessIsDenied}, but for _bulk requests since the entire * request will not be failed, just the individual ones */ - protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body, - Map params) throws IOException { - Response resp = getRestClient().performRequest(method, uri, params, entityOrNull(body), - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + protected void assertBodyHasAccessIsDenied(String user, Request request) throws IOException { + setUser(request, user); + Response resp = getRestClient().performRequest(request); StatusLine statusLine = resp.getStatusLine(); assertThat(statusLine.getStatusCode(), is(200)); HttpEntity bodyEntity = resp.getEntity(); @@ -93,11 +85,15 @@ protected void assertBodyHasAccessIsDenied(String user, String method, String ur assertThat(bodyStr, containsString("unauthorized for user [" + user + "]")); } - private static HttpEntity entityOrNull(String body) { - HttpEntity entity = null; - if (body != null) { - entity = new StringEntity(body, ContentType.APPLICATION_JSON); - } - return entity; + protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body) throws IOException { + Request request = new Request(method, uri); + request.setJsonEntity(body); + assertBodyHasAccessIsDenied(user, request); + } + + private void setUser(Request request, String user) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray()))); + request.setOptions(options); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java index 0b6321e596010..fc02a5c4d625e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.junit.BeforeClass; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 03d0310a136b3..bf81fd77dc59d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.integration; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.client.Request; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -15,9 +16,7 @@ import org.junit.BeforeClass; import java.nio.file.Path; -import java.util.Map; -import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.is; @@ -132,10 +131,12 @@ public void testThatSnapshotAndRestore() throws Exception { assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo", repoJson); assertAccessIsAllowed("user_a", "PUT", "/_snapshot/my-repo", repoJson); - Map params = singletonMap("refresh", "true"); - assertAccessIsDenied("user_a", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); - assertAccessIsDenied("user_b", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); - assertAccessIsAllowed("user_c", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); + Request createBar = new Request("PUT", "/someindex/bar/1"); + createBar.setJsonEntity("{ \"name\" : \"elasticsearch\" }"); + createBar.addParameter("refresh", "true"); + assertAccessIsDenied("user_a", createBar); + assertAccessIsDenied("user_b", createBar); + assertAccessIsAllowed("user_c", createBar); assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); @@ -152,10 +153,11 @@ public void testThatSnapshotAndRestore() throws Exception { assertAccessIsDenied("user_b", "DELETE", "/someindex"); assertAccessIsAllowed("user_c", "DELETE", "/someindex"); - params = singletonMap("wait_for_completion", "true"); - assertAccessIsDenied("user_b", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); - assertAccessIsDenied("user_c", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); - assertAccessIsAllowed("user_a", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); + Request restoreSnapshotRequest = new Request("POST", "/_snapshot/my-repo/my-snapshot/_restore"); + restoreSnapshotRequest.addParameter("wait_for_completion", "true"); + assertAccessIsDenied("user_b", restoreSnapshotRequest); + assertAccessIsDenied("user_c", restoreSnapshotRequest); + assertAccessIsAllowed("user_a", restoreSnapshotRequest); assertAccessIsDenied("user_a", "GET", "/someindex/bar/1"); assertAccessIsDenied("user_b", "GET", "/someindex/bar/1"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java index 57262822982fa..ed82808af7618 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -13,11 +13,8 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.Before; -import java.util.Collections; import java.util.Locale; -import java.util.Map; -import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.is; @@ -143,11 +140,12 @@ protected String configUsersRoles() { @Before public void insertBaseDocumentsAsAdmin() throws Exception { // indices: a,b,c,abc - Map params = singletonMap("refresh", "true"); - assertAccessIsAllowed("admin", "PUT", "/a/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/b/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/c/foo/1", jsonDoc, params); - assertAccessIsAllowed("admin", "PUT", "/abc/foo/1", jsonDoc, params); + for (String index : new String[] {"a", "b", "c", "abc"}) { + Request request = new Request("PUT", "/" + index + "/foo/1"); + request.setJsonEntity(jsonDoc); + request.addParameter("refresh", "true"); + assertAccessIsAllowed("admin", request); + } } private static String randomIndex() { @@ -402,8 +400,6 @@ public void testThatUnknownUserIsRejectedProperly() throws Exception { } private void assertUserExecutes(String user, String action, String index, boolean userIsAllowed) throws Exception { - Map refreshParams = Collections.emptyMap();//singletonMap("refresh", "true"); - switch (action) { case "all" : if (userIsAllowed) { @@ -438,7 +434,7 @@ private void assertUserExecutes(String user, String action, String index, boolea assertAccessIsAllowed(user, "POST", "/" + index + "/_open"); assertAccessIsAllowed(user, "POST", "/" + index + "/_cache/clear"); // indexing a document to have the mapping available, and wait for green state to make sure index is created - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc, refreshParams); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc); assertNoTimeout(client().admin().cluster().prepareHealth(index).setWaitForGreenStatus().get()); assertAccessIsAllowed(user, "GET", "/" + index + "/_mapping/foo/field/name"); assertAccessIsAllowed(user, "GET", "/" + index + "/_settings"); @@ -535,8 +531,8 @@ private void assertUserExecutes(String user, String action, String index, boolea case "delete" : String jsonDoc = "{ \"name\" : \"docToDelete\"}"; - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc, refreshParams); - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc, refreshParams); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc); if (userIsAllowed) { assertAccessIsAllowed(user, "DELETE", "/" + index + "/foo/docToDelete"); } else { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index af5b73d889dcf..63a38b12a9e17 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -88,7 +89,7 @@ public void setupReservedPasswords(RestClient restClient) throws IOException { RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); optionsBuilder.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, reservedPassword)); RequestOptions options = optionsBuilder.build(); - for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME)) { Request request = new Request("PUT", "/_xpack/security/user/" + username + "/_password"); request.setJsonEntity("{\"password\": \"" + new String(reservedPassword.getChars()) + "\"}"); request.setOptions(options); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 9bb0e44eb664c..7143182c1621a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -7,6 +7,7 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; + import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -44,7 +45,6 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.LocalStateSecurity; - import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; import org.junit.Before; @@ -420,14 +420,18 @@ protected void createIndicesWithRandomAliases(String... indices) { createIndex(indices); if (frequently()) { + boolean aliasAdded = false; IndicesAliasesRequestBuilder builder = client().admin().indices().prepareAliases(); for (String index : indices) { if (frequently()) { //one alias per index with prefix "alias-" builder.addAlias(index, "alias-" + index); + aliasAdded = true; } } - if (randomBoolean()) { + // If we get to this point and we haven't added an alias to the request we need to add one + // or the request will fail so use noAliasAdded to force adding the alias in this case + if (aliasAdded == false || randomBoolean()) { //one alias pointing to all indices for (String index : indices) { builder.addAlias(index, "alias"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java index db0548c03ef30..2ece398d3d19f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java @@ -52,36 +52,6 @@ private ApplicationPrivilegeDescriptor descriptor(String app, String name, Strin return new ApplicationPrivilegeDescriptor(app, name, Sets.newHashSet(actions), Collections.emptyMap()); } - public void testBuildRequestFromJsonObject() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - builder.source("foo", "read", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON); - final List privileges = builder.request().getPrivileges(); - assertThat(privileges, iterableWithSize(1)); - assertThat(privileges, contains(descriptor("foo", "read", "data:/read/*", "admin:/read/*"))); - } - - public void testPrivilegeNameValidationOfSingleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - builder.source("foo", "write", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON)); - assertThat(exception.getMessage(), containsString("write")); - assertThat(exception.getMessage(), containsString("read")); - } - - public void testApplicationNameValidationOfSingleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - builder.source("bar", "read", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON)); - assertThat(exception.getMessage(), containsString("foo")); - assertThat(exception.getMessage(), containsString("bar")); - } - public void testPrivilegeNameValidationOfMultipleElement() throws Exception { final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java index 1d4da71e11b5e..b659adf22cfc3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java @@ -14,7 +14,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { public void testNoKeystoreIsAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLKeystoreTypeIsNotAllowed() { @@ -22,7 +22,7 @@ public void testSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLImplicitKeystoreTypeIsNotAllowed() { @@ -30,7 +30,7 @@ public void testSSLImplicitKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testTransportSSLKeystoreTypeIsNotAllowed() { @@ -38,7 +38,7 @@ public void testTransportSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testHttpSSLKeystoreTypeIsNotAllowed() { @@ -46,7 +46,7 @@ public void testHttpSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testRealmKeystoreTypeIsNotAllowed() { @@ -54,13 +54,13 @@ public void testRealmKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testImplicitRealmKeystoreTypeIsNotAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java index a2ec8f9fb2053..fb4c9e21a258f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java @@ -17,27 +17,29 @@ public class FIPS140LicenseBootstrapCheckTests extends ESTestCase { public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck(false) - .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(randomBoolean()) + assertTrue(new FIPS140LicenseBootstrapCheck() .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); + assertTrue(new FIPS140LicenseBootstrapCheck() + .check(new BootstrapContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData + .EMPTY_META_DATA)).isSuccess()); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); MetaData metaData = builder.build(); + if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); } else { - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java index 8632400866a07..6376ca211dcae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java @@ -25,7 +25,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } @@ -35,7 +35,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } } @@ -55,7 +55,7 @@ private void runBCRYPTTest(final boolean fipsModeEnabled, final String passwordH } final Settings settings = builder.build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertThat(result.isFailure(), equalTo(fipsModeEnabled)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java index 2b9e540f3bfe4..e3b1cd31246a2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; import java.io.IOException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 1ac5490dc0c6a..577c7ddb2496c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.junit.Before; @@ -79,7 +79,7 @@ public void init() throws Exception { ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) - .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_5_4_0)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_6_0_0)) .build(); when(state.nodes()).thenReturn(nodes); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java index 0809276932daa..7c951c0014e89 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java index f939b175e48b1..f1363214b0706 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3371b901647c0..17a45f2389303 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -56,7 +56,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.UserToken; @@ -316,7 +316,7 @@ private Tuple storeToken(SamlNameId nameId, String session) t new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); final Map metadata = samlRealm.createTokenMetadata(nameId, session); final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, future, metadata); + tokenService.createUserToken(authentication, authentication, future, metadata, true); return future.actionGet(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1ce8b1aff1395..291c102f396c5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -46,7 +46,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; @@ -222,7 +222,7 @@ public void testLogoutInvalidatesToken() throws Exception { new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, future, tokenMetaData); + tokenService.createUserToken(authentication, authentication, future, tokenMetaData, true); final UserToken userToken = future.actionGet().v1(); mockGetTokenFromId(userToken, client); final String tokenString = tokenService.getUserTokenString(userToken); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java new file mode 100644 index 0000000000000..a6b92d79f1589 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.junit.After; +import org.junit.Before; + +import java.time.Clock; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportCreateTokenActionTests extends ESTestCase { + + private static final Settings SETTINGS = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TokenServiceTests") + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + + private ThreadPool threadPool; + private Client client; + private SecurityIndexManager securityIndex; + private ClusterService clusterService; + private AtomicReference idxReqReference; + private AuthenticationService authenticationService; + + @Before + public void setupClient() { + threadPool = new TestThreadPool(getTestName()); + client = mock(Client.class); + idxReqReference = new AtomicReference<>(); + authenticationService = mock(AuthenticationService.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(SETTINGS); + doAnswer(invocationOnMock -> { + GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareGet(anyString(), anyString(), anyString()); + when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(false); + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + when(client.prepareIndex(any(String.class), any(String.class), any(String.class))) + .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); + when(client.prepareUpdate(any(String.class), any(String.class), any(String.class))) + .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); + doAnswer(invocationOnMock -> { + idxReqReference.set((IndexRequest) invocationOnMock.getArguments()[1]); + ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; + responseActionListener.onResponse(new IndexResponse()); + return null; + }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); + + // setup lifecycle service + securityIndex = mock(SecurityIndexManager.class); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + + doAnswer(invocationOnMock -> { + UsernamePasswordToken token = (UsernamePasswordToken) invocationOnMock.getArguments()[2]; + User user = new User(token.principal()); + Authentication authentication = new Authentication(user, new Authentication.RealmRef("fake", "mock", "n1"), null); + authentication.writeToContext(threadPool.getThreadContext()); + ActionListener authListener = (ActionListener) invocationOnMock.getArguments()[3]; + authListener.onResponse(authentication); + return Void.TYPE; + }).when(authenticationService).authenticate(eq(CreateTokenAction.NAME), any(CreateTokenRequest.class), + any(UsernamePasswordToken.class), any(ActionListener.class)); + + this.clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @After + public void stopThreadPool() throws Exception { + if (threadPool != null) { + terminate(threadPool); + } + } + + public void testClientCredentialsCreatesWithoutRefreshToken() throws Exception { + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); + authentication.writeToContext(threadPool.getThreadContext()); + + final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, + authenticationService); + final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); + createTokenRequest.setGrantType("client_credentials"); + + PlainActionFuture tokenResponseFuture = new PlainActionFuture<>(); + action.doExecute(null, createTokenRequest, tokenResponseFuture); + CreateTokenResponse createTokenResponse = tokenResponseFuture.get(); + assertNull(createTokenResponse.getRefreshToken()); + assertNotNull(createTokenResponse.getTokenString()); + + assertNotNull(idxReqReference.get()); + Map sourceMap = idxReqReference.get().sourceAsMap(); + assertNotNull(sourceMap); + assertNotNull(sourceMap.get("access_token")); + assertNull(sourceMap.get("refresh_token")); + } + + public void testPasswordGrantTypeCreatesWithRefreshToken() throws Exception { + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); + authentication.writeToContext(threadPool.getThreadContext()); + + final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, + authenticationService); + final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); + createTokenRequest.setGrantType("password"); + createTokenRequest.setUsername("user"); + createTokenRequest.setPassword(new SecureString("password".toCharArray())); + + PlainActionFuture tokenResponseFuture = new PlainActionFuture<>(); + action.doExecute(null, createTokenRequest, tokenResponseFuture); + CreateTokenResponse createTokenResponse = tokenResponseFuture.get(); + assertNotNull(createTokenResponse.getRefreshToken()); + assertNotNull(createTokenResponse.getTokenString()); + + assertNotNull(idxReqReference.get()); + Map sourceMap = idxReqReference.get().sourceAsMap(); + assertNotNull(sourceMap); + assertNotNull(sourceMap.get("access_token")); + assertNotNull(sourceMap.get("refresh_token")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index 7862097d0006f..a8e2464805825 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index 410c164ffe741..aabaa40381f69 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java index 0c1ddbd9ba754..4e6e0b3551bc2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index ebdb145559192..1c5f93187c059 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java index d7795d3ab9188..a2e283e1b36ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.hamcrest.Matchers; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java index 36ba3f46b5e9e..b6037932f8a8f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java index 4ca7ab97f73d5..d811b6359b186 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.mockito.invocation.InvocationOnMock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index ba9a67eb48f74..b346fc6857e7e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java index 923c918f011d8..9bc5c989d1f9c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; import org.junit.After; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index dcb8d8b7569ce..cb1b69708bdf2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -55,7 +55,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.Message; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java index a3a9d05704f0d..4c9df8fd9d382 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail.AuditEventMetaInfo; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.MockMessage; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.RestContent; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index c8e14e668c911..1059e22abd663 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.rest.RemoteHostHeader; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 4a40e0d543bcf..1640ab727fe31 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -64,7 +64,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.AuthenticationService.Authenticator; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -896,7 +896,7 @@ public void testAuthenticateWithToken() throws Exception { PlainActionFuture> tokenFuture = new PlainActionFuture<>(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap(), true); } String token = tokenService.getUserTokenString(tokenFuture.get().v1()); mockGetTokenFromId(tokenFuture.get().v1(), client); @@ -975,7 +975,7 @@ public void testExpiredToken() throws Exception { PlainActionFuture> tokenFuture = new PlainActionFuture<>(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap(), true); } String token = tokenService.getUserTokenString(tokenFuture.get().v1()); mockGetTokenFromId(tokenFuture.get().v1(), client); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 1da7d68c91ca3..9d795826298ab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index ec4a97b7f392b..e6cc2dcccdfaa 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -341,6 +341,39 @@ public void testCreateThenRefreshAsDifferentUser() { assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.user().principal()); } + public void testClientCredentialsGrant() throws Exception { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("client_credentials") + .get(); + assertNull(createTokenResponse.getRefreshToken()); + + AuthenticateRequest request = new AuthenticateRequest(); + request.username(SecuritySettingsSource.TEST_SUPERUSER); + PlainActionFuture authFuture = new PlainActionFuture<>(); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, authFuture); + AuthenticateResponse response = authFuture.get(); + assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.user().principal()); + + // invalidate + PlainActionFuture invalidateResponseFuture = new PlainActionFuture<>(); + InvalidateTokenRequest invalidateTokenRequest = + new InvalidateTokenRequest(createTokenResponse.getTokenString(), InvalidateTokenRequest.Type.ACCESS_TOKEN); + securityClient.invalidateToken(invalidateTokenRequest, invalidateResponseFuture); + assertTrue(invalidateResponseFuture.get().isCreated()); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { + PlainActionFuture responseFuture = new PlainActionFuture<>(); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, responseFuture); + responseFuture.actionGet(); + }); + } + @Before public void waitForSecurityIndexWritable() throws Exception { assertSecurityIndexActive(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index c529ea8747b1e..b92b4cad39a26 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -48,7 +48,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; @@ -157,7 +157,7 @@ public void testAttachAndGetToken() throws Exception { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -203,7 +203,7 @@ public void testRotateKey() throws Exception { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -227,7 +227,7 @@ public void testRotateKey() throws Exception { } PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap(), true); final UserToken newToken = newTokenFuture.get().v1(); assertNotNull(newToken); assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); @@ -262,7 +262,7 @@ public void testKeyExchange() throws Exception { otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -292,7 +292,7 @@ public void testPruneKeys() throws Exception { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -322,7 +322,7 @@ public void testPruneKeys() throws Exception { } PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap(), true); final UserToken newToken = newTokenFuture.get().v1(); assertNotNull(newToken); assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); @@ -353,7 +353,7 @@ public void testPassphraseWorks() throws Exception { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); @@ -383,7 +383,7 @@ public void testGetTokenWhenKeyCacheHasExpired() throws Exception { Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); UserToken token = tokenFuture.get().v1(); assertThat(tokenService.getUserTokenString(token), notNullValue()); @@ -397,7 +397,7 @@ public void testInvalidatedToken() throws Exception { new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); doAnswer(invocationOnMock -> { @@ -451,7 +451,7 @@ public void testTokenExpiry() throws Exception { TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); mockGetTokenFromId(token); @@ -501,7 +501,8 @@ public void testTokenServiceDisabled() throws Exception { .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), Clock.systemUTC(), client, securityIndex, clusterService); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createUserToken(null, null, null, null)); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> tokenService.createUserToken(null, null, null, null, true)); assertEquals("tokens are not enabled", e.getMessage()); PlainActionFuture future = new PlainActionFuture<>(); @@ -559,7 +560,7 @@ public void testIndexNotAvailable() throws Exception { new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); final UserToken token = tokenFuture.get().v1(); assertNotNull(token); mockGetTokenFromId(token); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java index c79d77718ca44..1a8f8dc3b5d2d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; import java.time.Clock; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index 212ee7ea499ec..6d75cf093714a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.BeforeClass; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 6ebf6dca2cf1e..c2a7ea495a18b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -44,7 +44,7 @@ import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.junit.Before; import org.junit.BeforeClass; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index c7a7c4f07bb6c..f280e85f4ab1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -26,11 +26,12 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; @@ -81,7 +82,8 @@ void doExecute(Action action, Request request, ActionListener future = new PlainActionFuture<>(); nativeUsersStore.setEnabled(user, true, WriteRequest.RefreshPolicy.IMMEDIATE, future); @@ -99,7 +101,8 @@ public void testPasswordUpsertWhenSetEnabledOnReservedUser() throws Exception { public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { final NativeUsersStore nativeUsersStore = startNativeUsersStore(); - final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); final Map values = new HashMap<>(); values.put(ENABLED_FIELD, Boolean.TRUE); values.put(PASSWORD_FIELD, BLANK_PASSWORD); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java index 1824597a6adc6..8f7116dd9718c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; @@ -20,6 +21,7 @@ import org.junit.BeforeClass; import java.util.Arrays; +import java.util.List; import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -49,7 +51,9 @@ public Settings nodeSettings(int nodeOrdinal) { } public void testAuthenticate() { - for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); + for (String username : usernames) { ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) .admin() @@ -67,7 +71,9 @@ public void testAuthenticate() { */ public void testAuthenticateAfterEnablingUser() { final SecurityClient c = securityClient(); - for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + final List usernames = Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); + for (String username : usernames) { c.prepareSetEnabled(username, true).get(); ClusterHealthResponse response = client() .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) @@ -81,7 +87,8 @@ public void testAuthenticateAfterEnablingUser() { } public void testChangingPassword() { - String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, + BeatsSystemUser.NAME, APMSystemUser.NAME); final char[] newPassword = "supersecretvalue".toCharArray(); if (randomBoolean()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 04e0afcf88293..36d1690b8b202 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -21,12 +21,13 @@ import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.UsernamesField; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -262,7 +263,8 @@ public void testGetUsers() { PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); assertThat(userFuture.actionGet(), - containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), new BeatsSystemUser(true))); + containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), + new BeatsSystemUser(true), new APMSystemUser((true)))); } public void testGetUsersDisabled() { @@ -394,7 +396,7 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexExists new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); doAnswer((i) -> { ActionListener callback = (ActionListener) i.getArguments()[1]; callback.onResponse(null); @@ -416,14 +418,15 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexDoesNo new AnonymousUser(Settings.EMPTY), securityIndex, threadPool); PlainActionFuture listener = new PlainActionFuture<>(); - final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME, APMSystemUser.NAME); reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, mockSecureSettings.getString("bootstrap.password")), listener); final AuthenticationResult result = listener.get(); assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); } private User randomReservedUser(boolean enabled) { - return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), new BeatsSystemUser(enabled)); + return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), + new BeatsSystemUser(enabled), new APMSystemUser(enabled)); } /* @@ -444,23 +447,19 @@ public static void mockGetAllReservedUserInfo(NativeUsersStore usersStore, Map versionPredicate) { - assertThat(versionPredicate.test(Version.V_5_0_0_rc1), is(false)); switch (principal) { case LogstashSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_0_0), is(false)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(false)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; case BeatsSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_6_9), is(false)); assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; + case APMSystemUser.NAME: + assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); + assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); + break; default: - assertThat(versionPredicate.test(Version.V_5_0_0), is(true)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(true)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index aec2a4eb8208f..f5dad8b7c684c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; import org.mockito.stubbing.Answer; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java index ee89c2efe4f1e..739952af63ee7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java index 11aefb758fb29..7c5904d048a63 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java @@ -11,13 +11,20 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; +import org.elasticsearch.xpack.core.security.user.User; import org.ietf.jgss.GSSException; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.util.Collections; import java.util.List; import javax.security.auth.login.LoginException; @@ -29,7 +36,9 @@ import static org.mockito.AdditionalMatchers.aryEq; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class KerberosRealmAuthenticateFailedTests extends KerberosRealmTestCase { @@ -105,4 +114,30 @@ public void testAuthenticateDifferentFailureScenarios() throws LoginException, G any(ActionListener.class)); } } + + public void testDelegatedAuthorizationFailedToResolve() throws Exception { + final String username = randomPrincipalName(); + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final User lookupUser = new User(randomAlphaOfLength(5)); + otherRealm.registerUser(lookupUser); + + settings = Settings.builder().put(settings).putList("authorization_realms", "other_realm").build(); + final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); + final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); + final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); + final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); + final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); + + final PlainActionFuture future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + + AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(equalTo(AuthenticationResult.Status.CONTINUE))); + verify(mockKerberosTicketValidator, times(1)).validateTicket(aryEq(decodedTicket), eq(keytabPath), eq(krbDebug), + any(ActionListener.class)); + verify(mockNativeRoleMappingStore).refreshRealmOnChange(kerberosRealm); + verifyNoMoreInteractions(mockKerberosTicketValidator, mockNativeRoleMappingStore); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index 7ce8ee39c038d..69ebe15c5d74b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index 69b246cd7ca6c..dd83da49a0bb7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -13,15 +13,17 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -30,6 +32,7 @@ import java.nio.file.Path; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -58,6 +61,7 @@ public abstract class KerberosRealmTestCase extends ESTestCase { protected KerberosTicketValidator mockKerberosTicketValidator; protected NativeRoleMappingStore mockNativeRoleMappingStore; + protected XPackLicenseState licenseState; protected static final Set roles = Sets.newHashSet("admin", "kibana_user"); @@ -69,6 +73,8 @@ public void setup() throws Exception { globalSettings = Settings.builder().put("path.home", dir).build(); settings = KerberosTestCase.buildKerberosRealmSettings(KerberosTestCase.writeKeyTab(dir.resolve("key.keytab"), "asa").toString(), 100, "10m", true, randomBoolean()); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } @After @@ -102,12 +108,18 @@ protected void assertSuccessAuthenticationResult(final User expectedUser, final } protected KerberosRealm createKerberosRealm(final String... userForRoleMapping) { + return createKerberosRealm(Collections.emptyList(), userForRoleMapping); + } + + protected KerberosRealm createKerberosRealm(final List delegatedRealms, final String... userForRoleMapping) { config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); mockNativeRoleMappingStore = roleMappingStore(Arrays.asList(userForRoleMapping)); mockKerberosTicketValidator = mock(KerberosTicketValidator.class); final KerberosRealm kerberosRealm = new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null); + Collections.shuffle(delegatedRealms, random()); + kerberosRealm.initialize(delegatedRealms, licenseState); return kerberosRealm; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 80e61a5545fea..d35068fd07af2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -6,35 +6,38 @@ package org.elasticsearch.xpack.security.authc.kerberos; +import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.attribute.AclEntry; -import java.nio.file.attribute.AclEntryPermission; -import java.nio.file.attribute.AclEntryType; -import java.nio.file.attribute.AclFileAttributeView; -import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; -import java.nio.file.attribute.UserPrincipal; import java.util.Arrays; -import java.util.List; +import java.util.Collections; +import java.util.EnumSet; import java.util.Locale; import java.util.Set; @@ -47,6 +50,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; @@ -110,52 +114,47 @@ public void testLookupUser() { assertThat(future.actionGet(), is(nullValue())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32768") - public void testKerberosRealmWithInvalidKeytabPathConfigurations() throws IOException { - final String keytabPathCase = randomFrom("keytabPathAsDirectory", "keytabFileDoesNotExist", "keytabPathWithNoReadPermissions"); - final String expectedErrorMessage; - final String keytabPath; - switch (keytabPathCase) { - case "keytabPathAsDirectory": - final String dirName = randomAlphaOfLength(5); - Files.createDirectory(dir.resolve(dirName)); - keytabPath = dir.resolve(dirName).toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; - break; - case "keytabFileDoesNotExist": - keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; - break; - case "keytabPathWithNoReadPermissions": - final String fileName = randomAlphaOfLength(5); - final Path keytabFilePath = Files.createTempFile(dir, fileName, ".keytab"); - Files.write(keytabFilePath, randomAlphaOfLength(5).getBytes(StandardCharsets.UTF_8)); - final Set supportedAttributes = keytabFilePath.getFileSystem().supportedFileAttributeViews(); - if (supportedAttributes.contains("posix")) { - final PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(keytabFilePath, PosixFileAttributeView.class); - fileAttributeView.setPermissions(PosixFilePermissions.fromString("---------")); - } else if (supportedAttributes.contains("acl")) { - final UserPrincipal principal = Files.getOwner(keytabFilePath); - final AclFileAttributeView view = Files.getFileAttributeView(keytabFilePath, AclFileAttributeView.class); - final AclEntry entry = AclEntry.newBuilder() - .setType(AclEntryType.DENY) - .setPrincipal(principal) - .setPermissions(AclEntryPermission.READ_DATA, AclEntryPermission.READ_ATTRIBUTES).build(); - final List acl = view.getAcl(); - acl.add(0, entry); - view.setAcl(acl); - } else { - throw new UnsupportedOperationException( - String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", - keytabFilePath, supportedAttributes)); + public void testKerberosRealmThrowsErrorWhenKeytabPathIsConfiguredAsDirectory() throws IOException { + final String dirName = randomAlphaOfLength(5); + Files.createDirectory(dir.resolve(dirName)); + final String keytabPath = dir.resolve(dirName).toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; + + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileDoesNotExist() throws IOException { + final String keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; + + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileHasNoReadPermissions() throws IOException { + assumeFalse("Not running this test on Windows, as it requires additional access permissions for test framework.", + Constants.WINDOWS); + final Set supportedAttributes = dir.getFileSystem().supportedFileAttributeViews(); + final String keytabFileName = randomAlphaOfLength(5) + ".keytab"; + final Path keytabPath; + if (supportedAttributes.contains("posix")) { + final Set filePerms = PosixFilePermissions.fromString("---------"); + final FileAttribute> fileAttributes = PosixFilePermissions.asFileAttribute(filePerms); + try (SeekableByteChannel byteChannel = Files.newByteChannel(dir.resolve(keytabFileName), + EnumSet.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), fileAttributes)) { + byteChannel.write(ByteBuffer.wrap(randomByteArrayOfLength(10))); } - keytabPath = keytabFilePath.toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; - break; - default: - throw new IllegalArgumentException("Unknown test case :" + keytabPathCase); + keytabPath = dir.resolve(keytabFileName); + } else { + throw new UnsupportedOperationException( + String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", + dir.resolve(keytabFileName), supportedAttributes)); } + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; + + assertKerberosRealmConstructorFails(keytabPath.toString(), expectedErrorMessage); + } + private void assertKerberosRealmConstructorFails(final String keytabPath, final String expectedErrorMessage) { settings = KerberosTestCase.buildKerberosRealmSettings(keytabPath, 100, "10m", true, randomBoolean()); config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); @@ -165,4 +164,38 @@ public void testKerberosRealmWithInvalidKeytabPathConfigurations() throws IOExce () -> new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null)); assertThat(iae.getMessage(), is(equalTo(expectedErrorMessage))); } + + public void testDelegatedAuthorization() throws Exception { + final String username = randomPrincipalName(); + final String expectedUsername = maybeRemoveRealmName(username); + final MockLookupRealm otherRealm = spy(new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)))); + final User lookupUser = new User(expectedUsername, new String[] { "admin-role" }, expectedUsername, + expectedUsername + "@example.com", Collections.singletonMap("k1", "v1"), true); + otherRealm.registerUser(lookupUser); + + settings = Settings.builder().put(settings).putList("authorization_realms", "other_realm").build(); + final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); + final User expectedUser = lookupUser; + final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); + final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); + final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); + final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); + + PlainActionFuture future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + assertSuccessAuthenticationResult(expectedUser, "out-token", future.actionGet()); + + future = new PlainActionFuture<>(); + kerberosRealm.authenticate(kerberosAuthenticationToken, future); + assertSuccessAuthenticationResult(expectedUser, "out-token", future.actionGet()); + + verify(mockKerberosTicketValidator, times(2)).validateTicket(aryEq(decodedTicket), eq(keytabPath), eq(krbDebug), + any(ActionListener.class)); + verify(mockNativeRoleMappingStore).refreshRealmOnChange(kerberosRealm); + verifyNoMoreInteractions(mockKerberosTicketValidator, mockNativeRoleMappingStore); + verify(otherRealm, times(2)).lookupUser(eq(expectedUsername), any(ActionListener.class)); + } } + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 0f88148a9a973..f97afc1d52c2d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -72,6 +72,15 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("ks"); unsupportedLocaleLanguages.add("ckb"); unsupportedLocaleLanguages.add("ne"); + unsupportedLocaleLanguages.add("dz"); + unsupportedLocaleLanguages.add("mzn"); + unsupportedLocaleLanguages.add("mr"); + unsupportedLocaleLanguages.add("as"); + unsupportedLocaleLanguages.add("bn"); + unsupportedLocaleLanguages.add("lrc"); + unsupportedLocaleLanguages.add("my"); + unsupportedLocaleLanguages.add("ps"); + unsupportedLocaleLanguages.add("ur"); } @BeforeClass diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index a8f555bc3a392..2f5147ca2b17d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -35,7 +37,7 @@ import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.DownLevelADAuthenticator; @@ -48,6 +50,7 @@ import java.security.AccessController; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -91,6 +94,7 @@ public class ActiveDirectoryRealmTests extends ESTestCase { private ThreadPool threadPool; private Settings globalSettings; private SSLService sslService; + private XPackLicenseState licenseState; @BeforeClass public static void setNumberOfLdapServers() { @@ -125,6 +129,7 @@ public void start() throws Exception { resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); globalSettings = Settings.builder().put("path.home", createTempDir()).build(); sslService = new SSLService(globalSettings, TestEnvironment.newEnvironment(globalSettings)); + licenseState = new TestUtils.UpdatableLicenseState(); } @After @@ -163,6 +168,7 @@ public void testAuthenticateUserPrincipleName() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); @@ -179,6 +185,7 @@ public void testAuthenticateSAMAccountName() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); // Thor does not have a UPN of form CN=Thor@ad.test.elasticsearch.com PlainActionFuture future = new PlainActionFuture<>(); @@ -203,6 +210,7 @@ public void testAuthenticateCachesSuccessfulAuthentications() throws Exception { ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -221,6 +229,7 @@ public void testAuthenticateCachingCanBeDisabled() throws Exception { ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -239,6 +248,7 @@ public void testAuthenticateCachingClearsCacheOnRoleMapperRefresh() throws Excep ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); for (int i = 0; i < count; i++) { @@ -287,6 +297,7 @@ private void doUnauthenticatedLookup(boolean pooled) throws Exception { try (ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool)) { DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.lookupUser("CN=Thor", future); @@ -304,6 +315,7 @@ public void testRealmMapsGroupsToRoles() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); @@ -320,6 +332,7 @@ public void testRealmMapsUsersToRoles() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("CN=Thor", new SecureString(PASSWORD)), future); @@ -338,6 +351,7 @@ public void testRealmUsageStats() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java index 2807c501a5d6d..18b84df6d6185 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.ldap.LdapRealm.CancellableLdapRunnable; import java.util.concurrent.CountDownLatch; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index 5c98e2347cf12..fb20527575df9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -25,18 +26,21 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -50,11 +54,14 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class LdapRealmTests extends LdapTestCase { @@ -68,6 +75,7 @@ public class LdapRealmTests extends LdapTestCase { private ResourceWatcherService resourceWatcherService; private Settings defaultGlobalSettings; private SSLService sslService; + private XPackLicenseState licenseState; @Before public void init() throws Exception { @@ -75,6 +83,8 @@ public void init() throws Exception { resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); defaultGlobalSettings = Settings.builder().put("path.home", createTempDir()).build(); sslService = new SSLService(defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings)); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } @After @@ -87,10 +97,12 @@ public void testAuthenticateSubTreeGroupSearch() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -111,11 +123,13 @@ public void testAuthenticateOneLevelGroupSearch() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -136,12 +150,14 @@ public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -161,12 +177,15 @@ public void testAuthenticateCachingRefresh() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); future.actionGet(); @@ -194,12 +213,15 @@ public void testAuthenticateNoncaching() throws Exception { .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); future.actionGet(); @@ -211,6 +233,48 @@ public void testAuthenticateNoncaching() throws Exception { verify(ldapFactory, times(2)).session(anyString(), any(SecureString.class), any(ActionListener.class)); } + public void testDelegatedAuthorization() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + final Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), "mock_lookup"); + + if (randomBoolean()) { + // maybe disable caching + builder.put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1); + } + + final Settings realmSettings = builder.build(); + final Environment env = TestEnvironment.newEnvironment(defaultGlobalSettings); + RealmConfig config = new RealmConfig("test-ldap-realm", realmSettings, defaultGlobalSettings, env, threadPool.getThreadContext()); + + final LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + final DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); + final LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + + final MockLookupRealm mockLookup = new MockLookupRealm(new RealmConfig("mock_lookup", Settings.EMPTY, defaultGlobalSettings, env, + threadPool.getThreadContext())); + + ldap.initialize(Arrays.asList(ldap, mockLookup), licenseState); + mockLookup.initialize(Arrays.asList(ldap, mockLookup), licenseState); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result1 = future.actionGet(); + assertThat(result1.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result1.getMessage(), + equalTo("the principal [" + VALID_USERNAME + "] was authenticated, but no user could be found in realms [mock/mock_lookup]")); + + future = new PlainActionFuture<>(); + final User fakeUser = new User(VALID_USERNAME, "fake_role"); + mockLookup.registerUser(fakeUser); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result2 = future.actionGet(); + assertThat(result2.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result2.getUser(), sameInstance(fakeUser)); + } + public void testLdapRealmSelectsLdapSessionFactory() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; @@ -279,7 +343,8 @@ public void testLdapRealmThrowsExceptionForUserTemplateAndSearchSettings() throw .put("group_search.scope", LdapSearchScope.SUB_TREE) .put("ssl.verification_mode", VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); assertThat(e.getMessage(), @@ -295,7 +360,8 @@ public void testLdapRealmThrowsExceptionWhenNeitherUserTemplateNorSearchSettings .put("group_search.scope", LdapSearchScope.SUB_TREE) .put("ssl.verification_mode", VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); assertThat(e.getMessage(), @@ -312,11 +378,13 @@ public void testLdapRealmMapsUserDNToRole() throws Exception { .put(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(), getDataPath("/org/elasticsearch/xpack/security/authc/support/role_mapping.yml")) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken("Horatio Hornblower", new SecureString(PASSWORD)), future); @@ -339,10 +407,12 @@ public void testLdapConnectionFailureIsTreatedAsAuthenticationFailure() throws E String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(new String[] { url.toString() }, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, + TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); @@ -386,6 +456,7 @@ public void testUsageStats() throws Exception { LdapSessionFactory ldapFactory = new LdapSessionFactory(config, new SSLService(globalSettings, env), threadPool); LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); + realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index 2410d8c464997..8d4c5d75c7351 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -12,17 +12,21 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.NoOpLogger; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.junit.Before; import org.mockito.Mockito; @@ -43,9 +47,11 @@ import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -56,12 +62,15 @@ public class PkiRealmTests extends ESTestCase { private Settings globalSettings; + private XPackLicenseState licenseState; @Before public void setup() throws Exception { globalSettings = Settings.builder() .put("path.home", createTempDir()) .build(); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } public void testTokenSupport() { @@ -98,28 +107,14 @@ public void testAuthenticateWithRoleMapping() throws Exception { } private void assertSuccessfulAuthentication(Set roles) throws Exception { - String dn = "CN=Elasticsearch Test Node,"; - final String expectedUsername = "Elasticsearch Test Node"; - X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[] { certificate }, "Elasticsearch Test Node", dn); - UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + X509AuthenticationToken token = buildToken(); + UserRoleMapper roleMapper = buildRoleMapper(roles, token.dn()); + PkiRealm realm = buildRealm(roleMapper, Settings.EMPTY); verify(roleMapper).refreshRealmOnChange(realm); - Mockito.doAnswer(invocation -> { - final UserRoleMapper.UserData userData = (UserRoleMapper.UserData) invocation.getArguments()[0]; - final ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - if (userData.getDn().equals(dn)) { - listener.onResponse(roles); - } else { - listener.onFailure(new IllegalArgumentException("Expected DN '" + dn + "' but was '" + userData + "'")); - } - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - PlainActionFuture future = new PlainActionFuture<>(); - realm.authenticate(token, future); - final AuthenticationResult result = future.actionGet(); + final String expectedUsername = token.principal(); + final AuthenticationResult result = authenticate(token, realm); + final PlainActionFuture future; assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); User user = result.getUser(); assertThat(user, is(notNullValue())); @@ -149,17 +144,54 @@ private void assertSuccessfulAuthentication(Set roles) throws Exception verifyNoMoreInteractions(roleMapper); } + private UserRoleMapper buildRoleMapper(Set roles, String dn) { + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + Mockito.doAnswer(invocation -> { + final UserRoleMapper.UserData userData = (UserRoleMapper.UserData) invocation.getArguments()[0]; + final ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + if (userData.getDn().equals(dn)) { + listener.onResponse(roles); + } else { + listener.onFailure(new IllegalArgumentException("Expected DN '" + dn + "' but was '" + userData + "'")); + } + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + return roleMapper; + } + + private PkiRealm buildRealm(UserRoleMapper roleMapper, Settings realmSettings, Realm... otherRealms) { + PkiRealm realm = new PkiRealm(new RealmConfig("", realmSettings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), roleMapper); + List allRealms = CollectionUtils.arrayAsArrayList(otherRealms); + allRealms.add(realm); + Collections.shuffle(allRealms, random()); + realm.initialize(allRealms, licenseState); + return realm; + } + + private X509AuthenticationToken buildToken() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + return new X509AuthenticationToken(new X509Certificate[]{certificate}, "Elasticsearch Test Node", "CN=Elasticsearch Test Node,"); + } + + private AuthenticationResult authenticate(X509AuthenticationToken token, PkiRealm realm) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + return future.actionGet(); + } + public void testCustomUsernamePattern() throws Exception { + ThreadContext threadContext = new ThreadContext(globalSettings); X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)), - roleMapper); + PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -182,15 +214,16 @@ public void testVerificationUsingATruststore() throws Exception { .put("truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) .setSecureSettings(secureSettings) .build(); + ThreadContext threadContext = new ThreadContext(globalSettings); PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -213,15 +246,16 @@ public void testVerificationFailsUsingADifferentTruststore() throws Exception { getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) .setSecureSettings(secureSettings) .build(); + final ThreadContext threadContext = new ThreadContext(globalSettings); PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + threadContext), roleMapper); + realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptySet()); return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); X509AuthenticationToken token = realm.token(threadContext); @@ -307,6 +341,33 @@ public void testPKIRealmSettingsPassValidation() throws Exception { assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); } + public void testDelegatedAuthorization() throws Exception { + final X509AuthenticationToken token = buildToken(); + + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final User lookupUser = new User(token.principal()); + otherRealm.registerUser(lookupUser); + + final Settings realmSettings = Settings.builder() + .putList("authorization_realms", "other_realm") + .build(); + final UserRoleMapper roleMapper = buildRoleMapper(Collections.emptySet(), token.dn()); + final PkiRealm pkiRealm = buildRealm(roleMapper, realmSettings, otherRealm); + + AuthenticationResult result = authenticate(token, pkiRealm); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(lookupUser)); + + // check that the authorizing realm is consulted even for cached principals + final User lookupUser2 = new User(token.principal()); + otherRealm.registerUser(lookupUser2); + + result = authenticate(token, pkiRealm); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(lookupUser2)); + } + static X509Certificate readCert(Path path) throws Exception { try (InputStream in = Files.newInputStream(path)) { CertificateFactory factory = CertificateFactory.getInstance("X.509"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index 980abc46831c6..2ecfdb50230bf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -14,18 +14,24 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; import org.opensaml.saml.common.xml.SAMLConstants; @@ -71,6 +77,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Basic unit tests for the SAMLRealm @@ -83,9 +90,16 @@ public class SamlRealmTests extends SamlTestCase { private static final String REALM_NAME = "my-saml"; private static final String REALM_SETTINGS_PREFIX = "xpack.security.authc.realms." + REALM_NAME; + private Settings globalSettings; + private Environment env; + private ThreadContext threadContext; + @Before - public void initRealm() throws PrivilegedActionException { + public void setupEnv() throws PrivilegedActionException { SamlUtils.initialize(logger); + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); } public void testReadIdpMetadataFromFile() throws Exception { @@ -140,15 +154,70 @@ public void testReadIdpMetadataFromHttps() throws Exception { } public void testAuthenticateWithRoleMapping() throws Exception { + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + AtomicReference userData = new AtomicReference<>(); + Mockito.doAnswer(invocation -> { + assert invocation.getArguments().length == 2; + userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]); + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.singleton("superuser")); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + final boolean useNameId = randomBoolean(); final boolean principalIsEmailAddress = randomBoolean(); final Boolean populateUserMetadata = randomFrom(Boolean.TRUE, Boolean.FALSE, null); + + AuthenticationResult result = performAuthentication(roleMapper, useNameId, principalIsEmailAddress, populateUserMetadata, false); + assertThat(result.getUser().roles(), arrayContainingInAnyOrder("superuser")); + if (populateUserMetadata == Boolean.FALSE) { + // TODO : "saml_nameid" should be null too, but the logout code requires it for now. + assertThat(result.getUser().metadata().get("saml_uid"), nullValue()); + } else { + final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; + final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; + assertThat(result.getUser().metadata().get("saml_nameid"), equalTo(nameIdValue)); + assertThat(result.getUser().metadata().get("saml_uid"), instanceOf(Iterable.class)); + assertThat((Iterable) result.getUser().metadata().get("saml_uid"), contains(uidValue)); + } + + assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + } + + public void testAuthenticateWithAuthorizingRealm() throws Exception { final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + Mockito.doAnswer(invocation -> { + assert invocation.getArguments().length == 2; + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onFailure(new RuntimeException("Role mapping should not be called")); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + final boolean useNameId = randomBoolean(); + final boolean principalIsEmailAddress = randomBoolean(); + + AuthenticationResult result = performAuthentication(roleMapper, useNameId, principalIsEmailAddress, null, true); + assertThat(result.getUser().roles(), arrayContainingInAnyOrder("lookup_user_role")); + assertThat(result.getUser().fullName(), equalTo("Clinton Barton")); + assertThat(result.getUser().metadata().entrySet(), Matchers.iterableWithSize(1)); + assertThat(result.getUser().metadata().get("is_lookup"), Matchers.equalTo(true)); + } + + private AuthenticationResult performAuthentication(UserRoleMapper roleMapper, boolean useNameId, boolean principalIsEmailAddress, + Boolean populateUserMetadata, boolean useAuthorizingRealm) throws Exception { final EntityDescriptor idp = mockIdp(); final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null, Collections.emptyList()); final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + final String userPrincipal = useNameId ? "clint.barton" : "cbarton"; + final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; + final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; + + final MockLookupRealm lookupRealm = new MockLookupRealm( + new RealmConfig("mock_lookup", Settings.EMPTY,globalSettings, env, threadContext)); + final Settings.Builder settingsBuilder = Settings.builder() .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), useNameId ? "nameid" : "uid") .put(SamlRealmSettings.GROUPS_ATTRIBUTE.name(), "groups") @@ -161,15 +230,20 @@ public void testAuthenticateWithRoleMapping() throws Exception { if (populateUserMetadata != null) { settingsBuilder.put(SamlRealmSettings.POPULATE_USER_METADATA.getKey(), populateUserMetadata.booleanValue()); } - final Settings realmSettings = settingsBuilder.build(); + if (useAuthorizingRealm) { + settingsBuilder.putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), lookupRealm.name()); + lookupRealm.registerUser(new User(userPrincipal, new String[]{ "lookup_user_role" }, "Clinton Barton", "cbarton@shield.gov", + Collections.singletonMap("is_lookup", true), true)); + } + final Settings realmSettings = settingsBuilder.build(); final RealmConfig config = realmConfigFromRealmSettings(realmSettings); - final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + + initializeRealms(realm, lookupRealm); + final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); - final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; - final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; final SamlAttributes attributes = new SamlAttributes( new SamlNameId(NameIDType.PERSISTENT, nameIdValue, idp.getEntityID(), sp.getEntityId(), null), randomAlphaOfLength(16), @@ -178,36 +252,27 @@ public void testAuthenticateWithRoleMapping() throws Exception { new SamlAttributes.SamlAttribute("urn:oid:1.3.6.1.4.1.5923.1.5.1.1", "groups", Arrays.asList("avengers", "shield")), new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Arrays.asList("cbarton@shield.gov")) )); - Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); - - AtomicReference userData = new AtomicReference<>(); - Mockito.doAnswer(invocation -> { - assert invocation.getArguments().length == 2; - userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]); - ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; - listener.onResponse(Collections.singleton("superuser")); - return null; - }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + when(authenticator.authenticate(token)).thenReturn(attributes); final PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(token, future); final AuthenticationResult result = future.get(); assertThat(result, notNullValue()); assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); - assertThat(result.getUser().principal(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(result.getUser().principal(), equalTo(userPrincipal)); assertThat(result.getUser().email(), equalTo("cbarton@shield.gov")); - assertThat(result.getUser().roles(), arrayContainingInAnyOrder("superuser")); - if (populateUserMetadata == Boolean.FALSE) { - // TODO : "saml_nameid" should be null too, but the logout code requires it for now. - assertThat(result.getUser().metadata().get("saml_uid"), nullValue()); - } else { - assertThat(result.getUser().metadata().get("saml_nameid"), equalTo(nameIdValue)); - assertThat(result.getUser().metadata().get("saml_uid"), instanceOf(Iterable.class)); - assertThat((Iterable) result.getUser().metadata().get("saml_uid"), contains(uidValue)); - } - assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); - assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + return result; + } + + private void initializeRealms(Realm... realms) { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); + + final List realmList = Arrays.asList(realms); + for (Realm realm : realms) { + realm.initialize(realmList, licenseState); + } } public void testAttributeSelectionWithRegex() throws Exception { @@ -291,7 +356,7 @@ public void testNonMatchingPrincipalPatternThrowsSamlException() throws Exceptio Collections.singletonList( new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Collections.singletonList(mail)) )); - Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); + when(authenticator.authenticate(token)).thenReturn(attributes); final PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(token, future); @@ -515,8 +580,8 @@ public void testBuildLogoutRequest() throws Exception { final EntityDescriptor idp = mockIdp(); final IDPSSODescriptor role = mock(IDPSSODescriptor.class); final SingleLogoutService slo = SamlUtils.buildObject(SingleLogoutService.class, SingleLogoutService.DEFAULT_ELEMENT_NAME); - Mockito.when(idp.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME)).thenReturn(Collections.singletonList(role)); - Mockito.when(role.getSingleLogoutServices()).thenReturn(Collections.singletonList(slo)); + when(idp.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME)).thenReturn(Collections.singletonList(role)); + when(role.getSingleLogoutServices()).thenReturn(Collections.singletonList(slo)); slo.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); slo.setLocation("https://logout.saml/"); @@ -553,7 +618,7 @@ public void testBuildLogoutRequest() throws Exception { private EntityDescriptor mockIdp() { final EntityDescriptor descriptor = mock(EntityDescriptor.class); - Mockito.when(descriptor.getEntityID()).thenReturn("https://idp.saml/"); + when(descriptor.getEntityID()).thenReturn("https://idp.saml/"); return descriptor; } @@ -585,9 +650,7 @@ private Settings.Builder buildSettings(String idpMetaDataPath) { } private RealmConfig realmConfigFromRealmSettings(Settings realmSettings) { - final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); - final Environment env = TestEnvironment.newEnvironment(globalSettings); - return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, threadContext); } private RealmConfig realmConfigFromGlobalSettings(Settings globalSettings) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index e6830be18c58a..e9e8908c584a9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.junit.After; import org.junit.Before; @@ -31,6 +31,7 @@ import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.arrayContaining; @@ -39,6 +40,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -341,6 +343,33 @@ public void testLookupContract() throws Exception { assertThat(e.getMessage(), containsString("lookup exception")); } + public void testReturnDifferentObjectFromCache() throws Exception { + final AtomicReference userArg = new AtomicReference<>(); + final AtomicReference result = new AtomicReference<>(); + Realm realm = new AlwaysAuthenticateCachingRealm(globalSettings, threadPool) { + @Override + protected void handleCachedAuthentication(User user, ActionListener listener) { + userArg.set(user); + listener.onResponse(result.get()); + } + }; + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + final AuthenticationResult result1 = future.actionGet(); + assertThat(result1, notNullValue()); + assertThat(result1.getUser(), notNullValue()); + assertThat(result1.getUser().principal(), equalTo("user")); + + final AuthenticationResult result2 = AuthenticationResult.success(new User("user")); + result.set(result2); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + final AuthenticationResult result3 = future.actionGet(); + assertThat(result3, sameInstance(result2)); + assertThat(userArg.get(), sameInstance(result1.getUser())); + } + public void testSingleAuthPerUserLimit() throws Exception { final String username = "username"; final SecureString password = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java new file mode 100644 index 0000000000000..8f0d360b75975 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.Strings.collectionToDelimitedString; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DelegatedAuthorizationSupportTests extends ESTestCase { + + private List realms; + private Settings globalSettings; + private ThreadContext threadContext; + private Environment env; + + @Before + public void setupRealms() { + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); + + final int realmCount = randomIntBetween(5, 9); + realms = new ArrayList<>(realmCount); + for (int i = 1; i <= realmCount; i++) { + realms.add(new MockLookupRealm(buildRealmConfig("lookup-" + i, Settings.EMPTY))); + } + shuffle(realms); + } + + private List shuffle(List list) { + Collections.shuffle(list, random()); + return list; + } + + private RealmConfig buildRealmConfig(String name, Settings settings) { + return new RealmConfig(name, settings, globalSettings, env, threadContext); + } + + public void testEmptyDelegationList() throws ExecutionException, InterruptedException { + final XPackLicenseState license = getLicenseState(true); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", Settings.EMPTY), license); + assertThat(das.hasDelegation(), equalTo(false)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("any", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("No [authorization_realms] have been configured")); + } + + public void testMissingRealmInDelegationList() { + final XPackLicenseState license = getLicenseState(true); + final Settings settings = Settings.builder() + .putList("authorization_realms", "no-such-realm") + .build(); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license) + ); + assertThat(ex.getMessage(), equalTo("configured authorization realm [no-such-realm] does not exist (or is not enabled)")); + } + + public void testDelegationChainsAreRejected() { + final XPackLicenseState license = getLicenseState(true); + final Settings settings = Settings.builder() + .putList("authorization_realms", "lookup-1", "lookup-2", "lookup-3") + .build(); + globalSettings = Settings.builder() + .put(globalSettings) + .putList("xpack.security.authc.realms.lookup-2.authorization_realms", "lookup-1") + .build(); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + new DelegatedAuthorizationSupport(realms, buildRealmConfig("realm1", settings), license) + ); + assertThat(ex.getMessage(), + equalTo("cannot use realm [mock/lookup-2] as an authorization realm - it is already delegating authorization to [[lookup-1]]")); + } + + public void testMatchInDelegationList() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(1, realms.size()), realms)); + final Settings settings = Settings.builder() + .putList("authorization_realms", useRealms.stream().map(Realm::name).collect(Collectors.toList())) + .build(); + final User user = new User("my_user"); + randomFrom(useRealms).registerUser(user); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(user)); + } + + public void testRealmsAreOrdered() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(3, realms.size()), realms)); + final List names = useRealms.stream().map(Realm::name).collect(Collectors.toList()); + final Settings settings = Settings.builder() + .putList("authorization_realms", names) + .build(); + final List users = new ArrayList<>(names.size()); + final String username = randomAlphaOfLength(8); + for (MockLookupRealm r : useRealms) { + final User user = new User(username, "role_" + r.name()); + users.add(user); + r.registerUser(user); + } + + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve(username, future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), sameInstance(users.get(0))); + assertThat(result.getUser().roles(), arrayContaining("role_" + useRealms.get(0).name())); + } + + public void testNoMatchInDelegationList() throws Exception { + final XPackLicenseState license = getLicenseState(true); + final List useRealms = shuffle(randomSubsetOf(randomIntBetween(1, realms.size()), realms)); + final Settings settings = Settings.builder() + .putList("authorization_realms", useRealms.stream().map(Realm::name).collect(Collectors.toList())) + .build(); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("the principal [my_user] was authenticated, but no user could be found in realms [" + + collectionToDelimitedString(useRealms.stream().map(Realm::toString).collect(Collectors.toList()), ",") + "]")); + } + + public void testLicenseRejection() throws Exception { + final XPackLicenseState license = getLicenseState(false); + final Settings settings = Settings.builder() + .putList("authorization_realms", realms.get(0).name()) + .build(); + final DelegatedAuthorizationSupport das = new DelegatedAuthorizationSupport(realms, buildRealmConfig("r", settings), license); + assertThat(das.hasDelegation(), equalTo(true)); + final PlainActionFuture future = new PlainActionFuture<>(); + das.resolve("my_user", future); + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), equalTo("authorization_realms are not permitted")); + assertThat(result.getException(), instanceOf(ElasticsearchSecurityException.class)); + assertThat(result.getException().getMessage(), equalTo("current license is non-compliant for [authorization_realms]")); + } + + private XPackLicenseState getLicenseState(boolean authzRealmsAllowed) { + final XPackLicenseState license = mock(XPackLicenseState.class); + when(license.isAuthorizationRealmAllowed()).thenReturn(authzRealmsAllowed); + return license; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java new file mode 100644 index 0000000000000..01700347f5091 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.HashMap; +import java.util.Map; + +public class MockLookupRealm extends Realm { + + private final Map lookup; + + public MockLookupRealm(RealmConfig config) { + super("mock", config); + lookup = new HashMap<>(); + } + + public void registerUser(User user) { + this.lookup.put(user.principal(), user); + } + + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public AuthenticationToken token(ThreadContext context) { + return null; + } + + @Override + public void authenticate(AuthenticationToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onResponse(lookup.get(username)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java new file mode 100644 index 0000000000000..78be4b3ddf4c7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class RealmUserLookupTests extends ESTestCase { + + private Settings globalSettings; + private ThreadContext threadContext; + private Environment env; + + @Before + public void setup() { + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(globalSettings); + threadContext = new ThreadContext(globalSettings); + } + + public void testNoRealms() throws Exception { + final RealmUserLookup lookup = new RealmUserLookup(Collections.emptyList(), threadContext); + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(randomAlphaOfLengthBetween(3, 12), listener); + final Tuple tuple = listener.get(); + assertThat(tuple, nullValue()); + } + + public void testUserFound() throws Exception { + final List realms = buildRealms(randomIntBetween(5, 9)); + final RealmUserLookup lookup = new RealmUserLookup(realms, threadContext); + + final MockLookupRealm matchRealm = randomFrom(realms); + final User user = new User(randomAlphaOfLength(5)); + matchRealm.registerUser(user); + + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(user.principal(), listener); + final Tuple tuple = listener.get(); + assertThat(tuple, notNullValue()); + assertThat(tuple.v1(), notNullValue()); + assertThat(tuple.v1(), sameInstance(user)); + assertThat(tuple.v2(), notNullValue()); + assertThat(tuple.v2(), sameInstance(matchRealm)); + } + + public void testUserNotFound() throws Exception { + final List realms = buildRealms(randomIntBetween(5, 9)); + final RealmUserLookup lookup = new RealmUserLookup(realms, threadContext); + + final String username = randomAlphaOfLength(5); + + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup(username, listener); + final Tuple tuple = listener.get(); + assertThat(tuple, nullValue()); + } + + public void testRealmException() { + final Realm realm = new Realm("test", new RealmConfig("test", Settings.EMPTY, globalSettings, env, threadContext)) { + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public AuthenticationToken token(ThreadContext context) { + return null; + } + + @Override + public void authenticate(AuthenticationToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onFailure(new RuntimeException("FAILURE")); + } + }; + final RealmUserLookup lookup = new RealmUserLookup(Collections.singletonList(realm), threadContext); + final PlainActionFuture> listener = new PlainActionFuture<>(); + lookup.lookup("anyone", listener); + final RuntimeException e = expectThrows(RuntimeException.class, listener::actionGet); + assertThat(e.getMessage(), equalTo("FAILURE")); + } + + private List buildRealms(int realmCount) { + final List realms = new ArrayList<>(realmCount); + for (int i = 1; i <= realmCount; i++) { + final RealmConfig config = new RealmConfig("lookup-" + i, Settings.EMPTY, globalSettings, env, threadContext); + final MockLookupRealm realm = new MockLookupRealm(config); + for (int j = 0; j < 5; j++) { + realm.registerUser(new User(randomAlphaOfLengthBetween(6, 12))); + } + realms.add(realm); + } + Collections.shuffle(realms, random()); + return realms; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 2bee8fa09e381..052ba38551021 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 65c558d3d81e2..7722a9d216632 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -122,7 +122,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index a581d1abbb5df..9c9f2b1b1a42a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index d31f9f37b9158..c48ac4568989b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index cf9c09759ea09..ebced2307978b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -46,12 +46,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -60,7 +60,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.AnonymousUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.AuditTrailService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java index 07686838ad0e2..08e4b1123c70a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -18,10 +19,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.hamcrest.Matchers; @@ -208,7 +210,10 @@ public void testParse() throws Exception { } public void testSerialization() throws Exception { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, null); + logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("i1", "i2") @@ -235,6 +240,7 @@ public void testSerialization() throws Exception { final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), registry); + streamInput.setVersion(version); final RoleDescriptor serialized = RoleDescriptor.readFrom(streamInput); assertEquals(descriptor, serialized); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java index 087749da2406a..fac88e8af09b0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrailService; import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 825ce4ee44c60..34a0685c2fd21 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -195,7 +195,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { assertEquals(readIndicesPrivileges, indicesPrivileges.build()); out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0); + out.setVersion(Version.V_6_0_0); indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); indicesPrivileges.grantedFields(allowed); indicesPrivileges.deniedFields(denied); @@ -205,7 +205,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { indicesPrivileges.build().writeTo(out); out.close(); in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0); + in.setVersion(Version.V_6_0_0); RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = RoleDescriptor.IndicesPrivileges.createFrom(in); assertEquals(readIndicesPrivileges, readIndicesPrivileges2); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index a2c70db3b63e8..4000969187548 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -259,7 +259,7 @@ private ClusterState getClusterStateWithSecurityIndex() { .put(IndexMetaData.builder(securityIndexName).settings(settings)) .put(new IndexTemplateMetaData(SecurityIndexManager.SECURITY_TEMPLATE_NAME, 0, 0, Collections.singletonList(securityIndexName), Settings.EMPTY, ImmutableOpenMap.of(), - ImmutableOpenMap.of(), ImmutableOpenMap.of())) + ImmutableOpenMap.of())) .build(); if (withAlias) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java index 05c2882f3ac12..26c59a1ef5470 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessorTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property; import java.util.Collections; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 7d10198c6aea8..c3a6d7e920d1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -347,10 +347,10 @@ public void testIndexTemplateVersionMatching() throws Exception { assertTrue(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::before)); + Version.V_6_0_0::before)); assertFalse(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::after)); + Version.V_6_0_0::after)); } public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { @@ -448,4 +448,4 @@ private static IndexTemplateMetaData.Builder getIndexTemplateMetaData(String tem } return templateBuilder; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 09072f99fc20b..dd7dda48ae813 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java index 08a991eb3ec29..17df337d2916f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -27,9 +27,8 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -37,12 +36,10 @@ import java.io.IOException; import java.util.Collections; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.mock.orig.Mockito.times; import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; -import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; @@ -220,47 +217,6 @@ public void testNodeProfileAllowsNodeActions() throws Exception { verifyNoMoreInteractions(authcService, authzService); } - public void testHandlesKibanaUserCompatibility() throws Exception { - TransportRequest request = mock(TransportRequest.class); - User user = new User("kibana", "kibana"); - Authentication authentication = mock(Authentication.class); - final Version version = Version.fromId(randomIntBetween(Version.V_5_0_0_ID, Version.V_5_2_0_ID - 100)); - when(authentication.getVersion()).thenReturn(version); - when(authentication.getUser()).thenReturn(user); - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[3]; - callback.onResponse(authentication); - return Void.TYPE; - }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); - AtomicReference rolesRef = new AtomicReference<>(); - final Role empty = Role.EMPTY; - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[1]; - rolesRef.set(((User) i.getArguments()[0]).roles()); - callback.onResponse(empty); - return Void.TYPE; - }).when(authzService).roles(any(User.class), any(ActionListener.class)); - ServerTransportFilter filter = getClientOrNodeFilter(); - PlainActionFuture future = new PlainActionFuture<>(); - when(channel.getVersion()).thenReturn(version); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - - // test with a version that doesn't need changing - filter = getClientOrNodeFilter(); - rolesRef.set(null); - user = new KibanaUser(true); - when(authentication.getUser()).thenReturn(user); - when(authentication.getVersion()).thenReturn(Version.V_5_2_0); - future = new PlainActionFuture<>(); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - } - private ServerTransportFilter getClientOrNodeFilter() throws IOException { return randomBoolean() ? getNodeFilter(true) : getClientFilter(true); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 70ab085fcf72b..7397ebc8c7dc4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.MockSecureSettings; @@ -35,6 +34,9 @@ import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; +import javax.net.SocketFactory; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SSLSocket; import java.io.IOException; import java.net.InetAddress; import java.net.SocketTimeoutException; @@ -44,10 +46,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -import javax.net.SocketFactory; -import javax.net.ssl.HandshakeCompletedListener; -import javax.net.ssl.SSLSocket; - import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; @@ -118,12 +116,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java index 32816e40e0873..4c72afeb5cee7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.user.AnonymousUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java index 6bea620982fac..68b54198980d6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java @@ -5,15 +5,13 @@ */ package org.elasticsearch.xpack.security.user; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; import java.util.Arrays; @@ -60,46 +58,6 @@ public void testWriteToAndReadFromWithRunAs() throws Exception { assertThat(readFromAuthUser.authenticatedUser(), is(authUser)); } - public void testRunAsBackcompatRead() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - input.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - - public void testRunAsBackcompatWrite() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - public void testSystemUserReadAndWrite() throws Exception { BytesStreamOutput output = new BytesStreamOutput(); diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 039e78c14952c..62097e76b97ea 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -19,7 +19,8 @@ archivesBaseName = 'x-pack-sql' integTest.enabled = false dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly(project(':modules:lang-painless')) { // exclude ASM to not affect featureAware task on Java 10+ exclude group: "org.ow2.asm" diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a0d9b24c50729..1a7d6115e1556 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -8,7 +8,7 @@ archivesBaseName = "x-pack-sql-jdbc" forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencies { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java index 201ae251ca0df..ebdeaef15cae4 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -133,72 +133,37 @@ public String getString(int columnIndex) throws SQLException { @Override public boolean getBoolean(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? (Boolean) val : false; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a boolean", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Boolean.class) : false; } @Override public byte getByte(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).byteValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a byte", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Byte.class) : 0; } @Override public short getShort(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).shortValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a short", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Short.class) : 0; } @Override public int getInt(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).intValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to an int", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Integer.class) : 0; } @Override public long getLong(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).longValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Long.class) : 0; } @Override public float getFloat(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).floatValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a float", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Float.class) : 0; } @Override public double getDouble(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).doubleValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a double", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Double.class) : 0; } @Override @@ -272,15 +237,29 @@ public byte[] getBytes(String columnLabel) throws SQLException { @Override public Date getDate(String columnLabel) throws SQLException { + // TODO: the error message in case the value in the column cannot be converted to a Date refers to a column index + // (for example - "unable to convert column 4 to a long") and not to the column name, which is a bit confusing. + // Should we reconsider this? Maybe by catching the exception here and rethrowing it with the columnLabel instead. return getDate(column(columnLabel)); } private Long dateTime(int columnIndex) throws SQLException { Object val = column(columnIndex); + JDBCType type = cursor.columns().get(columnIndex - 1).type; try { + // TODO: the B6 appendix of the jdbc spec does mention CHAR, VARCHAR, LONGVARCHAR, DATE, TIMESTAMP as supported + // jdbc types that should be handled by getDate and getTime methods. From all of those we support VARCHAR and + // TIMESTAMP. Should we consider the VARCHAR conversion as a later enhancement? + if (JDBCType.TIMESTAMP.equals(type)) { + // the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will + // return the "smallest" data type for numbers when parsing + // TODO: this should probably be handled server side + return val == null ? null : ((Number) val).longValue(); + }; return val == null ? null : (Long) val; } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, type.getName()), cce); } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index aa9d434f332e3..7b638d8bd094f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -10,7 +10,6 @@ import java.sql.Date; import java.sql.JDBCType; -import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; @@ -56,9 +55,10 @@ private TypeConverter() { } - private static final long DAY_IN_MILLIS = 60 * 60 * 24; + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; private static final Map, JDBCType> javaToJDBC; - + + static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null @@ -119,7 +119,8 @@ private static T dateTimeConvert(Long millis, Calendar c, Function T convert(Object val, JDBCType columnType, Class type) throws SQLE if (type == null) { return (T) convert(val, columnType); } - - if (type.isInstance(val)) { + + // converting a Long to a Timestamp shouldn't be possible according to the spec, + // it feels a little brittle to check this scenario here and I don't particularly like it + // TODO: can we do any better or should we go over the spec and allow getLong(date) to be valid? + if (!(type == Long.class && columnType == JDBCType.TIMESTAMP) && type.isInstance(val)) { try { return type.cast(val); } catch (ClassCastException cce) { - throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName()), cce); } } - + if (type == String.class) { return (T) asString(convert(val, columnType)); } @@ -205,7 +210,8 @@ static T convert(Object val, JDBCType columnType, Class type) throws SQLE if (type == OffsetDateTime.class) { return (T) asOffsetDateTime(val, columnType); } - throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName())); } /** @@ -276,8 +282,8 @@ static boolean isSigned(JDBCType jdbcType) throws SQLException { } return dataType.isSigned(); } - - + + static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { for (Entry, JDBCType> e : javaToJDBC.entrySet()) { // java.util.Calendar from {@code javaToJDBC} is an abstract class and this method can be used with concrete classes as well @@ -285,7 +291,7 @@ static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { return e.getValue(); } } - + throw new SQLFeatureNotSupportedException("Objects of type " + clazz.getName() + " are not supported"); } @@ -336,8 +342,11 @@ private static Boolean asBoolean(Object val, JDBCType columnType) throws SQLExce case FLOAT: case DOUBLE: return Boolean.valueOf(Integer.signum(((Number) val).intValue()) != 0); + case VARCHAR: + return Boolean.valueOf((String) val); default: - throw new SQLException("Conversion from type [" + columnType + "] to [Boolean] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Boolean", val, columnType.getName())); } } @@ -355,10 +364,16 @@ private static Byte asByte(Object val, JDBCType columnType) throws SQLException case FLOAT: case DOUBLE: return safeToByte(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Byte.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Byte] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Byte", val, columnType.getName())); } private static Short asShort(Object val, JDBCType columnType) throws SQLException { @@ -374,10 +389,16 @@ private static Short asShort(Object val, JDBCType columnType) throws SQLExceptio case FLOAT: case DOUBLE: return safeToShort(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Short.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Short] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Short", val, columnType.getName())); } private static Integer asInteger(Object val, JDBCType columnType) throws SQLException { @@ -393,10 +414,18 @@ private static Integer asInteger(Object val, JDBCType columnType) throws SQLExce case FLOAT: case DOUBLE: return safeToInt(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Integer.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Integer] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to an Integer", val, columnType.getName())); } private static Long asLong(Object val, JDBCType columnType) throws SQLException { @@ -412,12 +441,21 @@ private static Long asLong(Object val, JDBCType columnType) throws SQLException case FLOAT: case DOUBLE: return safeToLong(((Number) val).doubleValue()); - case TIMESTAMP: - return ((Number) val).longValue(); + //TODO: should we support conversion to TIMESTAMP? + //The spec says that getLong() should support the following types conversions: + //TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR + //case TIMESTAMP: + // return ((Number) val).longValue(); + case VARCHAR: + try { + return Long.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Long] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, columnType.getName())); } private static Float asFloat(Object val, JDBCType columnType) throws SQLException { @@ -432,11 +470,17 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio case REAL: case FLOAT: case DOUBLE: - return new Float(((Number) val).doubleValue()); + return Float.valueOf((((float) ((Number) val).doubleValue()))); + case VARCHAR: + try { + return Float.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Float] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Float", val, columnType.getName())); } private static Double asDouble(Object val, JDBCType columnType) throws SQLException { @@ -451,32 +495,41 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept case REAL: case FLOAT: case DOUBLE: - return new Double(((Number) val).doubleValue()); + + return Double.valueOf(((Number) val).doubleValue()); + case VARCHAR: + try { + return Double.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Double] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Double", val, columnType.getName())); } private static Date asDate(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Date(utcMillisRemoveTime(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Date] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", val, columnType.getName())); } private static Time asTime(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Time(utcMillisRemoveDate(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Time] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", val, columnType.getName())); } private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Timestamp(((Number) val).longValue()); } - throw new SQLException("Conversion from type [" + columnType + "] to [Timestamp] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Timestamp", val, columnType.getName())); } private static byte[] asByteArray(Object val, JDBCType columnType) { @@ -539,4 +592,4 @@ private static long safeToLong(double x) throws SQLException { } return Math.round(x); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java index ad96825896e1a..35a3ec5748748 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java @@ -25,6 +25,7 @@ import java.util.Locale; import java.util.Map; +import static java.lang.String.format; import static java.sql.JDBCType.BIGINT; import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.DOUBLE; @@ -38,27 +39,27 @@ import static java.sql.JDBCType.VARCHAR; public class JdbcPreparedStatementTests extends ESTestCase { - + public void testSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setBoolean(1, true); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, false); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, true, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); assertTrue(value(jps) instanceof Boolean); - + jps.setObject(1, true, Types.INTEGER); assertEquals(1, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, true, Types.VARCHAR); assertEquals("true", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); @@ -66,264 +67,265 @@ public void testSettingBooleanValues() throws SQLException { public void testThrownExceptionsWhenSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP)); - assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [true] of type [BOOLEAN] to a Timestamp", sqle.getMessage()); } - + public void testSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setString(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar", Types.VARCHAR); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); assertTrue(value(jps) instanceof String); } - + public void testThrownExceptionsWhenSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER)); - assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [foo bar] of type [VARCHAR] to an Integer", sqle.getMessage()); } - + public void testSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setByte(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); - + jps.setObject(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); assertTrue(value(jps) instanceof Byte); - + jps.setObject(1, (byte) 0, Types.BOOLEAN); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.INTEGER); assertEquals(123, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, (byte) -128, Types.DOUBLE); assertEquals(-128.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [6] of type [TINYINT] to a Timestamp", sqle.getMessage()); } - + public void testSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + short someShort = randomShort(); jps.setShort(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); - + jps.setObject(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); assertTrue(value(jps) instanceof Short); - + jps.setObject(1, (short) 1, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (short) -32700, Types.DOUBLE); assertEquals(-32700.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someShort, Types.INTEGER); assertEquals((int) someShort, value(jps)); assertEquals(INTEGER, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage()); - + assertEquals("Unable to convert value [6] of type [SMALLINT] to a Timestamp", sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT)); assertEquals("Numeric " + 256 + " out of range", sqle.getMessage()); } - + public void testSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + int someInt = randomInt(); jps.setInt(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); assertTrue(value(jps) instanceof Integer); - + jps.setObject(1, someInt, Types.VARCHAR); assertEquals(String.valueOf(someInt), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someInt, Types.FLOAT); assertEquals(Double.valueOf(someInt), value(jps)); assertTrue(value(jps) instanceof Double); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); int someInt = randomInt(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP)); - assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage()); - + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [INTEGER] to a Timestamp", someInt), sqle.getMessage()); + Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.TINYINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); } - + public void testSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + long someLong = randomLong(); jps.setLong(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); - + jps.setObject(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); assertTrue(value(jps) instanceof Long); - + jps.setObject(1, someLong, Types.VARCHAR); assertEquals(String.valueOf(someLong), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someLong, Types.DOUBLE); assertEquals((double) someLong, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someLong, Types.FLOAT); assertEquals((double) someLong, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); long someLong = randomLong(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP)); - assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage()); - + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [BIGINT] to a Timestamp", someLong), sqle.getMessage()); + Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); } - + public void testSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + float someFloat = randomFloat(); jps.setFloat(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); - + jps.setObject(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); assertTrue(value(jps) instanceof Float); - + jps.setObject(1, someFloat, Types.VARCHAR); assertEquals(String.valueOf(someFloat), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.DOUBLE); assertEquals((double) someFloat, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.FLOAT); assertEquals((double) someFloat, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); float someFloat = randomFloat(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP)); - assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage()); - + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [REAL] to a Timestamp", someFloat), sqle.getMessage()); + Float floatNotInt = 5_155_000_000f; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.SMALLINT)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); } - + public void testSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + double someDouble = randomDouble(); jps.setDouble(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); assertTrue(value(jps) instanceof Double); - + jps.setObject(1, someDouble, Types.VARCHAR); assertEquals(String.valueOf(someDouble), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someDouble, Types.REAL); - assertEquals(new Float(someDouble), value(jps)); + assertEquals((float) someDouble, value(jps)); assertEquals(REAL, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); double someDouble = randomDouble(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP)); - assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage()); - + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [DOUBLE] to a Timestamp", someDouble), sqle.getMessage()); + Double doubleNotInt = 5_155_000_000d; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(((Number) doubleNotInt).longValue())), sqle.getMessage()); } - + public void testUnsupportedClasses() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLFeatureNotSupportedException sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new Struct() { @@ -341,218 +343,214 @@ public Object[] getAttributes() throws SQLException { } })); assertEquals("Objects of type java.sql.Struct are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setURL(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, this, Types.TIMESTAMP)); assertEquals("Conversion from type " + this.getClass().getName() + " to TIMESTAMP not supported", sfnse.getMessage()); - + SQLException se = expectThrows(SQLException.class, () -> jps.setObject(1, this, 1_000_000)); assertEquals("Type:1000000 is not a valid Types.java value.", se.getMessage()); - + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> jps.setObject(1, randomShort(), Types.CHAR)); assertEquals("Unsupported JDBC type [CHAR]", iae.getMessage()); } - + public void testSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); + Timestamp someTimestamp = new Timestamp(randomLong()); jps.setTimestamp(1, someTimestamp); assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); // February 29th, 2016. 01:17:55 GMT = 1456708675000 millis since epoch jps.setTimestamp(1, new Timestamp(1456708675000L), nonDefaultCal); assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); - - long beforeEpochTime = -randomMillisSinceEpoch(); + + long beforeEpochTime = randomLongBetween(Long.MIN_VALUE, 0); jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal); assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someTimestamp, Types.VARCHAR); assertEquals(someTimestamp.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); - + Timestamp someTimestamp = new Timestamp(randomLong()); + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER)); assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage()); } public void testSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + Time time = new Time(4675000); Calendar nonDefaultCal = randomCalendar(); jps.setTime(1, time, nonDefaultCal); assertEquals(4675000, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, time, Types.VARCHAR); assertEquals(time.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Time time = new Time(4675000); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, time, Types.INTEGER)); assertEquals("Conversion from type java.sql.Time to INTEGER not supported", sqle.getMessage()); } - + public void testSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); jps.setDate(1, someSqlDate); assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - - someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + + someSqlDate = new java.sql.Date(randomLong()); Calendar nonDefaultCal = randomCalendar(); jps.setDate(1, someSqlDate, nonDefaultCal); assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someSqlDate, Types.VARCHAR); assertEquals(someSqlDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); - - SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, - () -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE)); + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); + + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, + () -> jps.setObject(1, new java.sql.Date(randomLong()), Types.DOUBLE)); assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - someCalendar.setTimeInMillis(randomMillisSinceEpoch()); - + someCalendar.setTimeInMillis(randomLong()); + jps.setObject(1, someCalendar); assertEquals(someCalendar.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someCalendar, Types.VARCHAR); assertEquals(someCalendar.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); jps.setObject(1, nonDefaultCal); assertEquals(nonDefaultCal.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someCalendar, Types.DOUBLE)); assertEquals("Conversion from type " + someCalendar.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); - + Date someDate = new Date(randomLong()); + jps.setObject(1, someDate); assertEquals(someDate, (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someDate, Types.VARCHAR); assertEquals(someDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); - + Date someDate = new Date(randomLong()); + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT)); assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + jps.setObject(1, ldt); assertEquals(Date.class, value(jps).getClass()); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, ldt, Types.VARCHAR); assertEquals(ldt.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, ldt, Types.BIGINT)); assertEquals("Conversion from type " + ldt.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + byte[] buffer = "some data".getBytes(StandardCharsets.UTF_8); jps.setBytes(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); - + jps.setObject(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); assertTrue(value(jps) instanceof byte[]); - + jps.setObject(1, buffer, Types.VARBINARY); assertEquals((byte[]) value(jps), buffer); assertEquals(VARBINARY, jdbcType(jps)); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - + public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); byte[] buffer = "foo".getBytes(StandardCharsets.UTF_8); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - private long randomMillisSinceEpoch() { - return randomLongBetween(0, System.currentTimeMillis()); - } - private JdbcPreparedStatement createJdbcPreparedStatement() throws SQLException { return new JdbcPreparedStatement(null, JdbcConfiguration.create("jdbc:es://l:1", null, 0), "?"); } @@ -564,14 +562,14 @@ private JDBCType jdbcType(JdbcPreparedStatement jps) throws SQLException { private Object value(JdbcPreparedStatement jps) throws SQLException { return jps.query.getParam(1).value; } - + private Calendar randomCalendar() { return Calendar.getInstance(randomTimeZone(), Locale.ROOT); } - + /* * Converts from UTC to the provided Calendar. - * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert + * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert * the values correctly to UTC. */ private long convertFromUTCtoCalendar(Date date, Calendar nonDefaultCal) throws SQLException { diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index bf79fd824ef8d..9e53c36bbf600 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -2,9 +2,6 @@ /* * This project contains transport-level requests and responses that are shared between x-pack plugin and qa tests */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -34,7 +31,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { @@ -141,14 +138,4 @@ thirdPartyAudit.excludes = [ 'org.zeromq.ZMQ$Context', 'org.zeromq.ZMQ$Socket', 'org.zeromq.ZMQ' -] - -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - // Used by Log4J 2.11.1 - thirdPartyAudit.excludes += [ - 'java.io.ObjectInputFilter', - 'java.io.ObjectInputFilter$Config', - 'java.io.ObjectInputFilter$FilterInfo', - 'java.io.ObjectInputFilter$Status' - ] -} \ No newline at end of file +] \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index b90b07abad3d1..0b2559c6a84aa 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask /* * This project is named sql-cli because it is in the "org.elasticsearch.plugin" @@ -74,11 +75,8 @@ artifacts { } -forbiddenApisMain { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() -} -forbiddenApisTest { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() +tasks.withType(ForbiddenApisCliTask) { + signaturesFiles += files('src/forbidden/cli-signatures.txt') } thirdPartyAudit.excludes = [ diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 357a4bcb5a770..6431f10a49217 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -27,6 +27,7 @@ import org.jline.terminal.TerminalBuilder; import java.io.IOException; import java.net.ConnectException; +import java.sql.SQLInvalidAuthorizationSpecException; import java.util.Arrays; import java.util.List; import java.util.logging.LogManager; @@ -139,6 +140,10 @@ private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, Con // Most likely Elasticsearch is not running throw new UserException(ExitCodes.IO_ERROR, "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); + } else if (ex.getCause() != null && ex.getCause() instanceof SQLInvalidAuthorizationSpecException) { + throw new UserException(ExitCodes.NOPERM, + "Cannot establish a secure connection to the server " + + con.connectionString() + " - " + ex.getCause().getMessage()); } else { // Most likely we connected to something other than Elasticsearch throw new UserException(ExitCodes.DATA_ERROR, diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index fbc411e44596d..c4ee030d4568f 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -26,7 +26,7 @@ dependencyLicenses { forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index 7f26176e3c7a7..7d28336bfc51f 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -2,9 +2,6 @@ /* * This project contains XContent protocol classes shared between server and http client */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -25,7 +22,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 10586c991b1ac..b11542d40ed53 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -19,6 +19,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.xpack.sql.type.EsField; @@ -300,7 +301,7 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac private static GetIndexRequest createGetIndexRequest(String index) { return new GetIndexRequest() .local(true) - .indices(index) + .indices(Strings.commaDelimitedListToStringArray(index)) .features(Feature.MAPPINGS) //lenient because we throw our own errors looking at the response e.g. if something was not resolved //also because this way security doesn't throw authorization exceptions but rather honours ignore_unavailable diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 055e34758cc75..d0bff77a6485d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; @@ -92,7 +93,7 @@ public void query(Schema schema, QueryContainer query, String index, ActionListe log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); } - SearchRequest search = prepareRequest(client, sourceBuilder, timeout, index); + SearchRequest search = prepareRequest(client, sourceBuilder, timeout, Strings.commaDelimitedListToStringArray(index)); ActionListener l; if (query.isAggsOnly()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java index 8c79ae1ef0595..f09f543c6ffae 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -21,23 +21,9 @@ public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { ParseTree tree = ctx.name != null ? ctx.name : ctx.TABLE_IDENTIFIER(); String index = tree.getText(); - validateIndex(index, source); return new TableIdentifier(source, visitIdentifier(ctx.catalog), index); } - // see https://github.com/elastic/elasticsearch/issues/6736 - static void validateIndex(String index, Location source) { - for (int i = 0; i < index.length(); i++) { - char c = index.charAt(i); - if (Character.isUpperCase(c)) { - throw new ParsingException(source, "Invalid index name (needs to be lowercase) {}", index); - } - if (c == '\\' || c == '/' || c == '<' || c == '>' || c == '|' || c == ',' || c == ' ') { - throw new ParsingException(source, "Invalid index name (illegal character {}) {}", c, index); - } - } - } - @Override public String visitIdentifier(IdentifierContext ctx) { return ctx == null ? null : ctx.getText(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java index 5fb8a754f0f54..b8faedec71878 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -112,6 +112,9 @@ public Aggs addGroups(Collection groups) { } public Aggs addAgg(LeafAgg agg) { + if (metricAggs.contains(agg)) { + return this; + } return new Aggs(groups, combine(metricAggs, agg), pipelineAggs); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java index e98770318d218..931eaee646478 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java @@ -25,7 +25,8 @@ public GroupByColumnKey(String id, String fieldName, Direction direction) { public TermsValuesSourceBuilder asValueSource() { return new TermsValuesSourceBuilder(id()) .field(fieldName()) - .order(direction().asOrder()); + .order(direction().asOrder()) + .missingBucket(true); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java index 43c80e75057e9..61c00c706eeff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java @@ -44,7 +44,8 @@ public DateHistogramValuesSourceBuilder asValueSource() { return new DateHistogramValuesSourceBuilder(id()) .field(fieldName()) .dateHistogramInterval(new DateHistogramInterval(interval)) - .timeZone(DateTimeZone.forTimeZone(timeZone)); + .timeZone(DateTimeZone.forTimeZone(timeZone)) + .missingBucket(true); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java index a4af765d034bf..ccd2bf934ab61 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java @@ -36,7 +36,8 @@ public ScriptTemplate script() { public TermsValuesSourceBuilder asValueSource() { TermsValuesSourceBuilder builder = new TermsValuesSourceBuilder(id()) .script(script.toPainless()) - .order(direction().asOrder()); + .order(direction().asOrder()) + .missingBucket(true); if (script.outputType().isNumeric()) { builder.valueType(ValueType.NUMBER); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java index a4d9d4cb57ab6..d3336ec89a840 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java @@ -75,17 +75,27 @@ public void testLCase() { stringCharInputValidation(proc); } - public void testLCaseWithTRLocale() { + public void testLCaseWithAZTRLocale() { + Locale initialLocale = Locale.getDefault(); Locale.setDefault(Locale.forLanguageTag("tr")); - StringProcessor proc = new StringProcessor(StringOperation.LCASE); - // ES-SQL is not locale sensitive (so far). The obvious test for this is the Turkish language, uppercase letter I conversion - // in non-Turkish locale the lowercasing would create i and an additional dot, while in Turkish Locale it would only create "i" - // unicode 0069 = i - assertEquals("\u0069\u0307", proc.process("\u0130")); - // unicode 0049 = I (regular capital letter i) - // in Turkish locale this would be lowercased to a "i" without dot (unicode 0131) - assertEquals("\u0069", proc.process("\u0049")); + try { + StringProcessor proc = new StringProcessor(StringOperation.LCASE); + // ES-SQL is not locale sensitive (so far). The obvious test for this is the Turkish language, uppercase letter I conversion + // in non-Turkish locale the lowercasing would create i and an additional dot, while in Turkish Locale it would only create "i" + // unicode 0069 = i + assertEquals("\u0069\u0307", proc.process("\u0130")); + // unicode 0049 = I (regular capital letter i) + // in Turkish locale this would be lowercased to a "i" without dot (unicode 0131) + assertEquals("\u0069", proc.process("\u0049")); + + Locale.setDefault(Locale.forLanguageTag("az")); + assertEquals("\u0069\u0307", proc.process("\u0130")); + assertEquals("\u0069", proc.process("\u0049")); + } finally { + // restore the original Locale + Locale.setDefault(initialLocale); + } } public void testUCase() { @@ -102,13 +112,22 @@ public void testUCase() { stringCharInputValidation(proc); } - public void testUCaseWithTRLocale() { + public void testUCaseWithAZTRLocale() { + Locale initialLocale = Locale.getDefault(); Locale.setDefault(Locale.forLanguageTag("tr")); - StringProcessor proc = new StringProcessor(StringOperation.UCASE); - - // ES-SQL is not Locale sensitive (so far). - // in Turkish locale, small letter "i" is uppercased to "I" with a dot above (unicode 130), otherwise in "i" (unicode 49) - assertEquals("\u0049", proc.process("\u0069")); + + try { + StringProcessor proc = new StringProcessor(StringOperation.UCASE); + // ES-SQL is not Locale sensitive (so far). + // in Turkish locale, small letter "i" is uppercased to "I" with a dot above (unicode 130), otherwise in "i" (unicode 49) + assertEquals("\u0049", proc.process("\u0069")); + + Locale.setDefault(Locale.forLanguageTag("az")); + assertEquals("\u0049", proc.process("\u0069")); + } finally { + // restore the original Locale + Locale.setDefault(initialLocale); + } } public void testLength() { @@ -179,7 +198,7 @@ public void testCharLength() { assertEquals(7, proc.process("foo bar")); assertEquals(0, proc.process("")); assertEquals(1, proc.process('f')); - assertEquals(1, proc.process('€')); + assertEquals(1, proc.process('\u20ac')); // euro symbol stringCharInputValidation(proc); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java deleted file mode 100644 index ec8b8abc51f2d..0000000000000 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.parser; - -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.tree.Location; - -import static org.hamcrest.Matchers.is; - -public class IdentifierBuilderTests extends ESTestCase { - - private static Location L = new Location(1, 10); - - public void testTypicalIndex() throws Exception { - IdentifierBuilder.validateIndex("some-index", L); - } - - public void testInternalIndex() throws Exception { - IdentifierBuilder.validateIndex(".some-internal-index-2020-02-02", L); - } - - public void testIndexPattern() throws Exception { - IdentifierBuilder.validateIndex(".some-*", L); - } - - public void testInvalidIndex() throws Exception { - ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("some,index", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (illegal character ,) some,index")); - } - - public void testUpperCasedIndex() throws Exception { - ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("thisIsAnIndex", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (needs to be lowercase) thisIsAnIndex")); - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json index c94333325b127..d945ebe3247e0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json @@ -1,6 +1,6 @@ { "xpack.security.clear_cached_roles": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-clear-role-cache", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html", "methods": [ "POST" ], "url": { "path": "/_xpack/security/role/{name}/_clear_cache", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json index 4351b1bc847a1..881105d60b8b3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json @@ -1,6 +1,6 @@ { "xpack.security.delete_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-delete-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/role/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json index 26c72666e8fa4..4c1df6b99db79 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.delete_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-delete-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json index d72c854a69dcb..fa1deb3e1ec13 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json @@ -1,6 +1,6 @@ { "xpack.security.delete_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-delete-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/user/{username}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json index 3a72b3141911f..0e55e82ead628 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json @@ -1,6 +1,6 @@ { "xpack.security.disable_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-disable-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}/_disable", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json index c68144957f07d..da2f67adbea37 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json @@ -1,6 +1,6 @@ { "xpack.security.enable_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-enable-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}/_enable", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json index 3479c911ccdce..67bdbb8a911a2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json @@ -1,6 +1,6 @@ { "xpack.security.get_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-get-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/role/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json index 0bdeb54cfb678..7696f6671e489 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.get_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-get-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json index 8020d1ecd6d97..0b6f141d10e6a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json @@ -1,6 +1,6 @@ { "xpack.security.get_token": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-get-token", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html", "methods": [ "POST" ], "url": { "path": "/_xpack/security/oauth2/token", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json index 910fb7d064582..94dcbca81e18e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json @@ -1,6 +1,6 @@ { "xpack.security.get_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-get-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/user/{username}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json index 64b15ae9c0222..9c75b40e4d1a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json @@ -1,6 +1,6 @@ { "xpack.security.has_privileges": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-privileges.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html", "methods": [ "GET", "POST" ], "url": { "path": "/_xpack/security/user/_has_privileges", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json index be032c2ffd020..27dd103091422 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json @@ -1,6 +1,6 @@ { "xpack.security.invalidate_token": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-invalidate-token", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/oauth2/token", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json deleted file mode 100644 index 3d453682c6431..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack.security.put_privilege": { - "documentation": "TODO", - "methods": [ "POST", "PUT" ], - "url": { - "path": "/_xpack/security/privilege/{application}/{name}", - "paths": [ "/_xpack/security/privilege/{application}/{name}" ], - "parts": { - "application": { - "type" : "string", - "description" : "Application name", - "required" : true - }, - "name": { - "type" : "string", - "description" : "Privilege name", - "required" : true - } - }, - "params": { - "refresh": { - "type" : "enum", - "options": ["true", "false", "wait_for"], - "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." - } - } - }, - "body": { - "description" : "The privilege to add", - "required" : true - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json index 07eb541715810..312db3c9a1821 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json @@ -1,7 +1,7 @@ { "xpack.security.put_privileges": { "documentation": "TODO", - "methods": [ "POST" ], + "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/privilege/", "paths": [ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json index 4152975189e24..63ef5ee37867c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json @@ -1,6 +1,6 @@ { "xpack.security.put_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-put-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/role/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json index 3f92cd130bab4..98e723d80e9b0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.put_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-put-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json index de07498a40954..1b51783a05ef5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json @@ -1,6 +1,6 @@ { "xpack.security.put_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-put-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml index 94af4c345fda1..431629b1d23b8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml @@ -19,11 +19,15 @@ index: bar body: leader_index: foo + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents - do: ccr.stats: index: bar + - match: { bar.0.leader_index: "foo" } - match: { bar.0.shard_id: 0 } - gte: { bar.0.leader_global_checkpoint: -1 } - gte: { bar.0.leader_max_seq_no: -1 } @@ -33,7 +37,7 @@ - gte: { bar.0.number_of_concurrent_reads: 0 } - match: { bar.0.number_of_concurrent_writes: 0 } - match: { bar.0.number_of_queued_writes: 0 } - - gte: { bar.0.index_metadata_version: 0 } + - gte: { bar.0.mapping_version: 0 } - gte: { bar.0.total_fetch_time_millis: 0 } - gte: { bar.0.number_of_successful_fetches: 0 } - gte: { bar.0.number_of_failed_fetches: 0 } @@ -43,6 +47,8 @@ - match: { bar.0.number_of_successful_bulk_operations: 0 } - match: { bar.0.number_of_failed_bulk_operations: 0 } - match: { bar.0.number_of_operations_indexed: 0 } + - length: { bar.0.fetch_exceptions: 0 } + - gte: { bar.0.time_since_last_fetch_millis: -1 } - do: ccr.unfollow_index: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml index e8dddf2153576..30fa3a8d07840 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml @@ -30,24 +30,26 @@ teardown: ignore: 404 --- "Test put and get privileges": - # Single privilege, with names in URL + # Single privilege - do: - xpack.security.put_privilege: - application: app - name: p1 + xpack.security.put_privileges: body: > { - "application": "app", - "name": "p1", - "actions": [ "data:read/*" , "action:login" ], - "metadata": { - "key1" : "val1a", - "key2" : "val2a" + "app": { + "p1": { + "application": "app", + "name": "p1", + "actions": [ "data:read/*" , "action:login" ], + "metadata": { + "key1" : "val1a", + "key2" : "val2a" + } + } } } - match: { "app.p1" : { created: true } } - # Multiple privileges, no names in URL + # Multiple privileges - do: xpack.security.put_privileges: body: > @@ -84,18 +86,18 @@ teardown: - match: { "app.p3" : { created: true } } - match: { "app2.p1" : { created: true } } - # Update existing privilege, with names in URL + # Update existing privilege - do: - xpack.security.put_privilege: - application: app - name: p1 + xpack.security.put_privileges: body: > { - "application": "app", - "name": "p1", - "actions": [ "data:read/*" , "action:login" ], - "metadata": { - "key3" : "val3" + "app": { + "p1": { + "actions": [ "data:read/*" , "action:login" ], + "metadata": { + "key3" : "val3" + } + } } } - match: { "app.p1" : { created: false } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index f3fa8114ddbd0..759ddbad2b463 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -210,3 +210,4 @@ setup: job_state: "stopped" upgraded_doc_id: true + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 516be25be2a2d..23df0c5837700 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -128,6 +128,38 @@ setup: ] } +--- +"Test put_job in non-rollup index": + - do: + indices.create: + index: non-rollup + - do: + catch: /foo/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "non-rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + --- "Try to include headers": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index d401d5c69bacb..e2f1174665ea6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -152,6 +152,20 @@ setup: - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } - match: { aggregations.histo.buckets.3.doc_count: 20 } +--- +"Empty aggregation": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: {} + + - length: { hits.hits: 0 } + - match: { hits.total: 0 } + - is_false: aggregations + --- "Search with Metric": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml new file mode 100644 index 0000000000000..57bfd821ea24d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -0,0 +1,343 @@ +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "foo_only_access" + ignore: 404 + +--- +"Index-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["foo"], "privileges": ["all"] }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + + - do: + indices.create: + index: foobar + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # This index pattern will match both indices, but we only have permission to read one + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo*", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" + + +--- +"Attribute-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { + "names": ["foo"], + "privileges": ["all"], + "query": { + "template": { + "source": "{\"bool\":{\"filter\":[{\"term\":{\"visibility\":\"public\"}}]}}" + } + } + }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + visibility: + type: keyword + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + visibility: "public" + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + visibility: "private" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # Index contains two docs, but we should only be able to see one of them + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml index b3a1e22069083..a7d3fabd2a282 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml @@ -31,21 +31,25 @@ setup: } - do: - xpack.security.put_privilege: - application: app-allow - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app-allow": { + "read": { + "actions": [ "data:read/*" ] + } + } } - do: - xpack.security.put_privilege: - application: app_deny - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app-deny": { + "read": { + "actions": [ "data:read/*" ] + } + } } --- @@ -82,12 +86,14 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.security.put_privilege: - application: app - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app": { + "read": { + "actions": [ "data:read/*" ] + } + } } - match: { "app.read" : { created: true } } @@ -112,12 +118,14 @@ teardown: "Test put application privileges when not allowed": - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.security.put_privilege: - application: app_deny - name: write + xpack.security.put_privileges: body: > { - "actions": [ "data:write/*" ] + "app_deny": { + "write": { + "actions": [ "data:write/*" ] + } + } } catch: forbidden diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index a33fcdb529745..7a22ad322bfc2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,6 +1,8 @@ --- "Test watcher usage stats output": - + - skip: + version: "all" + reason: AwaitsFix at https://github.com/elastic/elasticsearch/issues/33326 - do: catch: missing xpack.watcher.delete_watch: diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle index f95cde7134c56..56ce274dd1166 100644 --- a/x-pack/plugin/upgrade/build.gradle +++ b/x-pack/plugin/upgrade/build.gradle @@ -14,7 +14,8 @@ esplugin { archivesBaseName = 'x-pack-upgrade' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java index 07017e6fc0014..ad0ebd6815f2d 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java @@ -79,7 +79,7 @@ private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String in } } // Catch all check for all indices that didn't match the specific checks - if (indexMetaData.getCreationVersion().before(Version.V_5_0_0)) { + if (indexMetaData.getCreationVersion().before(Version.V_6_0_0)) { return UpgradeActionRequired.REINDEX; } else { return null; diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java index 82208f1f5ceb1..d7be0a389e30d 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java @@ -6,8 +6,6 @@ package org.elasticsearch.xpack.upgrade; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; @@ -116,10 +114,10 @@ private void removeReadOnlyBlock(ParentTaskAssigningClient parentAwareClient, St private void reindex(ParentTaskAssigningClient parentAwareClient, String index, String newIndex, ActionListener listener) { - SearchRequest sourceRequest = new SearchRequest(index); - sourceRequest.types(types); - IndexRequest destinationRequest = new IndexRequest(newIndex); - ReindexRequest reindexRequest = new ReindexRequest(sourceRequest, destinationRequest); + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(index); + reindexRequest.setSourceDocTypes(types); + reindexRequest.setDestIndex(newIndex); reindexRequest.setRefresh(true); reindexRequest.setScript(transformScript); parentAwareClient.execute(ReindexAction.INSTANCE, reindexRequest, listener); diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java index 568397e37395a..e454ac4a0140b 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java @@ -44,7 +44,7 @@ public class Upgrade extends Plugin implements ActionPlugin { - public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0; + public static final Version UPGRADE_INTRODUCED = Version.CURRENT.minimumCompatibilityVersion(); private final Settings settings; private final List> upgradeCheckFactories; diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java index 5939777572b48..f980450c07f7c 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java @@ -166,7 +166,7 @@ public static IndexMetaData newTestIndexMeta(String name, String alias, Settings .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0) .put(indexSettings) .build(); IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(build); diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java index cd83803d1884c..71e3348b058b6 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java @@ -206,9 +206,9 @@ private ClusterState withRandomOldNode() { DiscoveryNode node = discoveryNodes.get(nodeId); DiscoveryNode newNode = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), - randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_4_0)); + randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); return ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(discoveryNodes).remove(node).add(newNode)).build(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index a0feab6746359..3412cafc4f4ce 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -25,7 +25,8 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -67,7 +68,7 @@ thirdPartyAudit.excludes = [ ] // pulled in as external dependency to work on java 9 -if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { +if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += [ 'com.sun.activation.registries.MailcapParseException', 'javax.activation.ActivationDataFlavor', diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 620d575fc802c..279d768fde81a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -22,13 +21,16 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -45,7 +47,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); - private final AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); + private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private final boolean requireManualStart; private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; @@ -110,6 +112,7 @@ public void clusterChanged(ClusterChangedEvent event) { // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; } @@ -144,15 +147,20 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - List currentAllocationIds = localShards.stream() - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .sorted() + // also check if non local shards have changed, as loosing a shard on a + // remote node or adding a replica on a remote node needs to trigger a reload too + Set localShardIds = localShards.stream().map(ShardRouting::shardId).collect(Collectors.toSet()); + List allShards = event.state().routingTable().index(watchIndex).shardsWithState(STARTED); + allShards.addAll(event.state().routingTable().index(watchIndex).shardsWithState(RELOCATING)); + List localAffectedShardRoutings = allShards.stream() + .filter(shardRouting -> localShardIds.contains(shardRouting.shardId())) + // shardrouting is not comparable, so we need some order mechanism + .sorted(Comparator.comparing(ShardRouting::hashCode)) .collect(Collectors.toList()); - if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + if (previousShardRoutings.get().equals(localAffectedShardRoutings) == false) { if (watcherService.validate(event.state())) { - previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); } else if (state.get() == WatcherState.STOPPED) { @@ -187,13 +195,13 @@ private boolean isWatcherStoppedManually(ClusterState state) { * @return true, if existing allocation ids were cleaned out, false otherwise */ private boolean clearAllocationIds() { - List previousIds = previousAllocationIds.getAndSet(Collections.emptyList()); - return previousIds.equals(Collections.emptyList()) == false; + List previousIds = previousShardRoutings.getAndSet(Collections.emptyList()); + return previousIds.isEmpty() == false; } // for testing purposes only - List allocationIds() { - return previousAllocationIds.get(); + List shardRoutings() { + return previousShardRoutings.get(); } public WatcherState getState() { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 49915674fe9e2..599287bb50a76 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -183,9 +183,6 @@ void reload(ClusterState state, String reason) { // by checking the cluster state version before and after loading the watches we can potentially just exit without applying the // changes processedClusterStateVersion.set(state.getVersion()); - triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); - logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); @@ -221,6 +218,7 @@ private synchronized boolean reloadInner(ClusterState state, String reason, bool if (processedClusterStateVersion.get() != state.getVersion()) { logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", state.getVersion(), processedClusterStateVersion.get()); + return false; } Collection watches = loadWatches(state); @@ -231,7 +229,13 @@ private synchronized boolean reloadInner(ClusterState state, String reason, bool // if we had another state coming in the meantime, we will not start the trigger engines with these watches, but wait // until the others are loaded + // also this is the place where we pause the trigger service execution and clear the current execution service, so that we make sure + // that existing executions finish, but no new ones are executed if (processedClusterStateVersion.get() == state.getVersion()) { + triggerService.pauseExecution(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); + executionService.unPause(); triggerService.start(watches); if (triggeredWatches.isEmpty() == false) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java index ec309c69476cc..1e6285f71d781 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -13,20 +13,15 @@ public abstract class CronnableSchedule implements Schedule { - private static final Comparator CRON_COMPARATOR = new Comparator() { - @Override - public int compare(Cron c1, Cron c2) { - return c1.expression().compareTo(c2.expression()); - } - }; + private static final Comparator CRON_COMPARATOR = Comparator.comparing(Cron::expression); protected final Cron[] crons; - public CronnableSchedule(String... expressions) { + CronnableSchedule(String... expressions) { this(crons(expressions)); } - public CronnableSchedule(Cron... crons) { + private CronnableSchedule(Cron... crons) { assert crons.length > 0; this.crons = crons; Arrays.sort(crons, CRON_COMPARATOR); @@ -35,11 +30,14 @@ public CronnableSchedule(Cron... crons) { @Override public long nextScheduledTimeAfter(long startTime, long time) { assert time >= startTime; - long nextTime = Long.MAX_VALUE; - for (Cron cron : crons) { - nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time)); - } - return nextTime; + return Arrays.stream(crons) + .map(cron -> cron.getNextValidTimeAfter(time)) + // filter out expired dates before sorting + .filter(nextValidTime -> nextValidTime > -1) + .sorted() + .findFirst() + // no date in the future found, return -1 to the caller + .orElse(-1L); } public Cron[] crons() { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index 05aa7cf302817..4c10f794880b9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -56,7 +56,7 @@ public synchronized void start(Collection jobs) { schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); } } - this.schedules.putAll(schedules); + this.schedules = schedules; } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index 33b1217895dca..d22d402aa157e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -61,14 +61,18 @@ void execute(Terminal terminal, String expression, int count) throws Exception { Cron cron = new Cron(expression); long time = date.getMillis(); + for (int i = 0; i < count; i++) { long prevTime = time; time = cron.getNextValidTimeAfter(time); if (time < 0) { - throw new UserException(ExitCodes.OK, (i + 1) + ".\t Could not compute future times since [" - + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + if (i == 0) { + throw new UserException(ExitCodes.OK, "Could not compute future times since [" + + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + } + break; } - terminal.println((i+1) + ".\t" + formatter.print(time)); + terminal.println((i + 1) + ".\t" + formatter.print(time)); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 700901753d4a1..384338af5a284 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -254,9 +254,12 @@ public void testReplicaWasAddedOrRemoved() { .add(newNode("node_2")) .build(); + ShardRouting firstShardOnSecondNode = TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED); + ShardRouting secondShardOnFirstNode = TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) @@ -273,10 +276,19 @@ public void testReplicaWasAddedOrRemoved() { .metaData(MetaData.builder().put(indexMetaData, false)) .build(); + // add a replica in the local node + boolean addShardOnLocalNode = randomBoolean(); + final ShardRouting addedShardRouting; + if (addShardOnLocalNode) { + addedShardRouting = TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED); + } else { + addedShardRouting = TestShardRouting.newShardRouting(secondShardId, "node_2", false, STARTED); + } + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) + .addShard(addedShardRouting) .build(); ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) @@ -477,7 +489,67 @@ public void testDataNodeWithoutDataCanStart() { assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); } - private ClusterState startWatcher() { + // this emulates a node outage somewhere in the cluster that carried a watcher shard + // the number of shards remains the same, but we need to ensure that watcher properly reloads + // previously we only checked the local shard allocations, but we also need to check if shards in the cluster have changed + public void testWatcherReloadsOnNodeOutageWithWatcherShard() { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + String localNodeId = randomFrom("node_1", "node_2"); + String outageNodeId = localNodeId.equals("node_1") ? "node_2" : "node_1"; + DiscoveryNodes previousDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .add(newNode(outageNodeId)) + .build(); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, localNodeId, false, STARTED); + ShardRouting primartShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(replicaShardRouting) + .addShard(primartShardRouting) + .build(); + + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(previousDiscoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + ShardRouting nowPrimaryShardRouting = replicaShardRouting.moveActiveReplicaToPrimary(); + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(nowPrimaryShardRouting) + .build(); + + DiscoveryNodes currentDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .build(); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(currentDiscoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // initialize the previous state, so all the allocation ids are loaded + when(watcherService.validate(anyObject())).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("whatever", previousState, currentState)); + + reset(watcherService); + when(watcherService.validate(anyObject())).thenReturn(true); + ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); + lifeCycleService.clusterChanged(event); + verify(watcherService).reload(eq(event.state()), anyString()); + } + + private void startWatcher() { Index index = new Index(Watch.INDEX, "uuid"); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); indexRoutingTableBuilder.addShard( @@ -506,12 +578,10 @@ private ClusterState startWatcher() { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); verify(watcherService, times(1)).reload(eq(state), anyString()); - assertThat(lifeCycleService.allocationIds(), hasSize(1)); + assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again reset(watcherService); - - return state; } private List randomIndexPatterns() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java index 7331be4553bbe..7c3abd1d808e9 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java @@ -33,7 +33,7 @@ public void testOpEvalEQ() throws Exception { assertThat(CompareCondition.Op.EQ.eval(null, null), is(true)); assertThat(CompareCondition.Op.EQ.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.EQ.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.EQ.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, null), is(false)); assertThat(CompareCondition.Op.EQ.eval(2, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.EQ.eval(3, "4"), is(false)); // comparing as strings @@ -59,7 +59,7 @@ public void testOpEvalNotEQ() throws Exception { assertThat(CompareCondition.Op.NOT_EQ.eval(null, null), is(false)); assertThat(CompareCondition.Op.NOT_EQ.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.NOT_EQ.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(2, Float.valueOf((float)3.0)), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, null), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(2, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.NOT_EQ.eval(3, "4"), is(true)); // comparing as strings @@ -83,7 +83,7 @@ public void testOpEvalNotEQ() throws Exception { public void testOpEvalGTE() throws Exception { assertThat(CompareCondition.Op.GTE.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.GTE.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GTE.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GTE.eval(3, "4"), is(false)); // comparing as strings @@ -103,7 +103,7 @@ public void testOpEvalGTE() throws Exception { public void testOpEvalGT() throws Exception { assertThat(CompareCondition.Op.GT.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.GT.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GT.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GT.eval(3, null), is(false)); assertThat(CompareCondition.Op.GT.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GT.eval(3, "4"), is(false)); // comparing as strings @@ -124,7 +124,7 @@ public void testOpEvalGT() throws Exception { public void testOpEvalLTE() throws Exception { assertThat(CompareCondition.Op.LTE.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.LTE.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LTE.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LTE.eval(3, "4"), is(true)); // comparing as strings @@ -144,7 +144,7 @@ public void testOpEvalLTE() throws Exception { public void testOpEvalLT() throws Exception { assertThat(CompareCondition.Op.LT.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.LT.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LT.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LT.eval(3, null), is(false)); assertThat(CompareCondition.Op.LT.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LT.eval(3, "4"), is(true)); // comparing as strings diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index bb593bcb67ae2..d3f46d3d452f7 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.actions.Action; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java index 1ade767410b53..6cbdf6e122656 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java @@ -11,11 +11,15 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.hasItemInArray; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; public class CronScheduleTests extends ScheduleTestCase { public void testInvalid() throws Exception { @@ -54,18 +58,25 @@ public void testParseMultiple() throws Exception { assertThat(crons, hasItemInArray("0 0/3 * * * ?")); } + public void testMultipleCronsNextScheduledAfter() { + CronSchedule schedule = new CronSchedule("0 5 9 1 1 ? 2019", "0 5 9 1 1 ? 2020", "0 5 9 1 1 ? 2017"); + ZonedDateTime start2019 = ZonedDateTime.of(2019, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + ZonedDateTime start2020 = ZonedDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + long firstSchedule = schedule.nextScheduledTimeAfter(0, start2019.toInstant().toEpochMilli()); + long secondSchedule = schedule.nextScheduledTimeAfter(0, start2020.toInstant().toEpochMilli()); + + assertThat(firstSchedule, is(not(-1L))); + assertThat(secondSchedule, is(not(-1L))); + assertThat(firstSchedule, is(not(secondSchedule))); + } + public void testParseInvalidBadExpression() throws Exception { XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?"); BytesReference bytes = BytesReference.bytes(builder); XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); - try { - new CronSchedule.Parser().parse(parser); - fail("expected cron parsing to fail when using invalid cron expression"); - } catch (ElasticsearchParseException pe) { - // expected - assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class)); - } + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> new CronSchedule.Parser().parse(parser)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } public void testParseInvalidEmpty() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 7949998867b48..6680b38ab94b3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -35,7 +35,9 @@ import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; import static org.joda.time.DateTimeZone.UTC; import static org.mockito.Mockito.mock; @@ -50,8 +52,12 @@ public void init() throws Exception { } private TriggerEngine createEngine() { - return new TickerScheduleTriggerEngine(Settings.EMPTY, - mock(ScheduleRegistry.class), clock); + Settings settings = Settings.EMPTY; + // having a low value here speeds up the tests tremendously, we still want to run with the defaults every now and then + if (usually()) { + settings = Settings.builder().put(TickerScheduleTriggerEngine.TICKER_INTERVAL_SETTING.getKey(), "10ms").build(); + } + return new TickerScheduleTriggerEngine(settings, mock(ScheduleRegistry.class), clock); } private void advanceClockIfNeeded(DateTime newCurrentDateTime) { @@ -104,6 +110,40 @@ public void accept(Iterable events) { assertThat(bits.cardinality(), is(count)); } + public void testStartClearsExistingSchedules() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + List firedWatchIds = new ArrayList<>(); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + firedWatchIds.add(event.jobName()); + } + latch.countDown(); + } + }); + + int count = randomIntBetween(2, 5); + List watches = new ArrayList<>(); + for (int i = 0; i < count; i++) { + watches.add(createWatch(String.valueOf(i), interval("1s"))); + } + engine.start(watches); + + watches.clear(); + for (int i = 0; i < count; i++) { + watches.add(createWatch("another_id" + i, interval("1s"))); + } + engine.start(watches); + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!latch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(firedWatchIds, everyItem(startsWith("another_id"))); + } + public void testAddHourly() throws Exception { final String name = "job_name"; final CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java index 2238842494817..f1e864d547c83 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java @@ -8,6 +8,13 @@ import org.elasticsearch.cli.Command; import org.elasticsearch.cli.CommandTestCase; +import java.util.Calendar; +import java.util.Locale; +import java.util.TimeZone; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + public class CronEvalToolTests extends CommandTestCase { @Override protected Command newCommand() { @@ -18,6 +25,27 @@ public void testParse() throws Exception { String countOption = randomBoolean() ? "-c" : "--count"; int count = randomIntBetween(1, 100); String output = execute(countOption, Integer.toString(count), "0 0 0 1-6 * ?"); - assertTrue(output, output.contains("Here are the next " + count + " times this cron expression will trigger")); + assertThat(output, containsString("Here are the next " + count + " times this cron expression will trigger")); + } + + public void testGetNextValidTimes() throws Exception { + final int year = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT).get(Calendar.YEAR) + 1; + { + String output = execute("0 3 23 8 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + assertThat(output, not(containsString("2.\t"))); + } + { + String output = execute("0 3 23 */4 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + } + { + Exception expectThrows = expectThrows(Exception.class, () -> execute("0 3 23 */4 9 ? 2017")); + String message = expectThrows.getMessage(); + assertThat(message, containsString("Could not compute future times since")); + assertThat(message, containsString("(perhaps the cron expression only points to times in the past?)")); + } } } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 3b9032f092185..1d3e51c11e027 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -412,8 +411,7 @@ public FeatureSet(String name, @Nullable String description, boolean available, } public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_5_4_0) ? in.readMap() : null); + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); } @Override @@ -422,9 +420,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(description); out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeMap(nativeCodeInfo); - } + out.writeMap(nativeCodeInfo); } public String name() { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java new file mode 100644 index 0000000000000..455434f7ac4a9 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A Connection links exactly two {@link Vertex} objects. The basis of a + * connection is one or more documents have been found that contain + * this pair of terms and the strength of the connection is recorded + * as a weight. + */ +public class Connection { + private Vertex from; + private Vertex to; + private double weight; + private long docCount; + + public Connection(Vertex from, Vertex to, double weight, long docCount) { + this.from = from; + this.to = to; + this.weight = weight; + this.docCount = docCount; + } + + public Connection(StreamInput in, Map vertices) throws IOException { + from = vertices.get(new VertexId(in.readString(), in.readString())); + to = vertices.get(new VertexId(in.readString(), in.readString())); + weight = in.readDouble(); + docCount = in.readVLong(); + } + + Connection() { + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(from.getField()); + out.writeString(from.getTerm()); + out.writeString(to.getField()); + out.writeString(to.getTerm()); + out.writeDouble(weight); + out.writeVLong(docCount); + } + + public ConnectionId getId() { + return new ConnectionId(from.getId(), to.getId()); + } + + public Vertex getFrom() { + return from; + } + + public Vertex getTo() { + return to; + } + + /** + * @return a measure of the relative connectedness between a pair of {@link Vertex} objects + */ + public double getWeight() { + return weight; + } + + /** + * @return the number of documents in the sampled set that contained this + * pair of {@link Vertex} objects. + */ + public long getDocCount() { + return docCount; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + + + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); + } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + } + + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Connection} + */ + public static class ConnectionId { + private final VertexId source; + private final VertexId target; + + public ConnectionId(VertexId source, VertexId target) { + this.source = source; + this.target = target; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ConnectionId vertexId = (ConnectionId) o; + + if (source != null ? !source.equals(vertexId.source) : vertexId.source != null) + return false; + if (target != null ? !target.equals(vertexId.target) : vertexId.target != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = source != null ? source.hashCode() : 0; + result = 31 * result + (target != null ? target.hashCode() : 0); + return result; + } + + public VertexId getSource() { + return source; + } + + public VertexId getTarget() { + return target; + } + + @Override + public String toString() { + return getSource() + "->" + getTarget(); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java new file mode 100644 index 0000000000000..495ea5fd28ac3 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -0,0 +1,401 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +/** + * Holds the criteria required to guide the exploration of connected terms which + * can be returned as a graph. + */ +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { + + public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; + public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private TimeValue timeout; + + private int sampleSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE; + private String sampleDiversityField; + private int maxDocsPerDiversityValue; + private boolean useSignificance = true; + private boolean returnDetailedInfo; + + private List hops = new ArrayList<>(); + + public GraphExploreRequest() { + } + + /** + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. + */ + public GraphExploreRequest(String... indices) { + this.indices = indices; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (hops.size() == 0) { + validationException = ValidateActions.addValidationError(NO_HOPS_ERROR_MESSAGE, validationException); + } + for (Hop hop : hops) { + validationException = hop.validate(validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return this.indices; + } + + @Override + public GraphExploreRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { + if (indicesOptions == null) { + throw new IllegalArgumentException("IndicesOptions must not be null"); + } + this.indicesOptions = indicesOptions; + return this; + } + + public String[] types() { + return this.types; + } + + public GraphExploreRequest types(String... types) { + this.types = types; + return this; + } + + public String routing() { + return this.routing; + } + + public GraphExploreRequest routing(String routing) { + this.routing = routing; + return this; + } + + public GraphExploreRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + public TimeValue timeout() { + return timeout; + } + + /** + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring + */ + public GraphExploreRequest timeout(TimeValue timeout) { + if (timeout == null) { + throw new IllegalArgumentException("timeout must not be null"); + } + this.timeout = timeout; + return this; + } + + public GraphExploreRequest timeout(String timeout) { + timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + types = in.readStringArray(); + routing = in.readOptionalString(); + timeout = in.readOptionalTimeValue(); + sampleSize = in.readInt(); + sampleDiversityField = in.readOptionalString(); + maxDocsPerDiversityValue = in.readInt(); + + useSignificance = in.readBoolean(); + returnDetailedInfo = in.readBoolean(); + + int numHops = in.readInt(); + Hop parentHop = null; + for (int i = 0; i < numHops; i++) { + Hop hop = new Hop(parentHop); + hop.readFrom(in); + hops.add(hop); + parentHop = hop; + } + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeStringArray(types); + out.writeOptionalString(routing); + out.writeOptionalTimeValue(timeout); + + out.writeInt(sampleSize); + out.writeOptionalString(sampleDiversityField); + out.writeInt(maxDocsPerDiversityValue); + + out.writeBoolean(useSignificance); + out.writeBoolean(returnDetailedInfo); + out.writeInt(hops.size()); + for (Iterator iterator = hops.iterator(); iterator.hasNext();) { + Hop hop = iterator.next(); + hop.writeTo(out); + } + } + + @Override + public String toString() { + return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]"; + } + + /** + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

    + * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents + *

    + * + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents + */ + public void sampleSize(int maxNumberOfDocsPerHop) { + sampleSize = maxNumberOfDocsPerHop; + } + + public int sampleSize() { + return sampleSize; + } + + /** + * Optional choice of single-value field on which to diversify sampled + * search results + */ + public void sampleDiversityField(String name) { + sampleDiversityField = name; + } + + public String sampleDiversityField() { + return sampleDiversityField; + } + + /** + * Optional number of permitted docs with same value in sampled search + * results. Must also declare which field using sampleDiversityField + */ + public void maxDocsPerDiversityValue(int maxDocs) { + this.maxDocsPerDiversityValue = maxDocs; + } + + public int maxDocsPerDiversityValue() { + return maxDocsPerDiversityValue; + } + + /** + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. + */ + public void useSignificance(boolean value) { + this.useSignificance = value; + } + + public boolean useSignificance() { + return useSignificance; + } + + /** + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses + */ + public void returnDetailedInfo(boolean value) { + this.returnDetailedInfo = value; + } + + public boolean returnDetailedInfo() { + return returnDetailedInfo; + } + + /** + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration + */ + public Hop createNextHop(QueryBuilder guidingQuery) { + Hop parent = null; + if (hops.size() > 0) { + parent = hops.get(hops.size() - 1); + } + Hop newHop = new Hop(parent); + newHop.guidingQuery = guidingQuery; + hops.add(newHop); + return newHop; + } + + public int getHopNumbers() { + return hops.size(); + } + + public Hop getHop(int hopNumber) { + return hops.get(hopNumber); + } + + public static class TermBoost { + String term; + float boost; + + public TermBoost(String term, float boost) { + super(); + this.term = term; + if (boost <= 0) { + throw new IllegalArgumentException("Boosts must be a positive non-zero number"); + } + this.boost = boost; + } + + TermBoost() { + } + + public String getTerm() { + return term; + } + + public float getBoost() { + return boost; + } + + void readFrom(StreamInput in) throws IOException { + this.term = in.readString(); + this.boost = in.readFloat(); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(term); + out.writeFloat(boost); + } + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java new file mode 100644 index 0000000000000..baaaedf0163ed --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects + * (nodes and edges in common graph parlance). + * + * @see GraphExploreRequest + */ +public class GraphExploreResponse extends ActionResponse implements ToXContentObject { + + private long tookInMillis; + private boolean timedOut = false; + private ShardOperationFailedException[] shardFailures = ShardSearchFailure.EMPTY_ARRAY; + private Map vertices; + private Map connections; + private boolean returnDetailedInfo; + static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo"; + + public GraphExploreResponse() { + } + + public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, + Map vertices, Map connections, boolean returnDetailedInfo) { + this.tookInMillis = tookInMillis; + this.timedOut = timedOut; + this.shardFailures = shardFailures; + this.vertices = vertices; + this.connections = connections; + this.returnDetailedInfo = returnDetailedInfo; + } + + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded + * (not all hops may have been completed in this case) + */ + public boolean isTimedOut() { + return this.timedOut; + } + public ShardOperationFailedException[] getShardFailures() { + return shardFailures; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tookInMillis = in.readVLong(); + timedOut = in.readBoolean(); + + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = readShardSearchFailure(in); + } + } + // read vertices + size = in.readVInt(); + vertices = new HashMap<>(); + for (int i = 0; i < size; i++) { + Vertex n = Vertex.readFrom(in); + vertices.put(n.getId(), n); + } + + size = in.readVInt(); + + connections = new HashMap<>(); + for (int i = 0; i < size; i++) { + Connection e = new Connection(in, vertices); + connections.put(e.getId(), e); + } + + returnDetailedInfo = in.readBoolean(); + + } + + public Collection getConnections() { + return connections.values(); + } + + public Collection getVertices() { + return vertices.values(); + } + + public Vertex getVertex(VertexId id) { + return vertices.get(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(tookInMillis); + out.writeBoolean(timedOut); + + out.writeVInt(shardFailures.length); + for (ShardOperationFailedException shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + + out.writeVInt(vertices.size()); + for (Vertex vertex : vertices.values()) { + vertex.writeTo(out); + } + + out.writeVInt(connections.size()); + for (Connection connection : connections.values()) { + connection.writeTo(out); + } + + out.writeBoolean(returnDetailedInfo); + + } + + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); + + builder.startArray(FAILURES.getPreferredName()); + if (shardFailures != null) { + for (ShardOperationFailedException shardFailure : shardFailures) { + builder.startObject(); + shardFailure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + + ObjectIntHashMap vertexNumbers = new ObjectIntHashMap<>(vertices.size()); + + Map extraParams = new HashMap<>(); + extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); + Params extendedParams = new DelegatingMapParams(extraParams, params); + + builder.startArray(VERTICES.getPreferredName()); + for (Vertex vertex : vertices.values()) { + builder.startObject(); + vertexNumbers.put(vertex, vertexNumbers.size()); + vertex.toXContent(builder, extendedParams); + builder.endObject(); + } + builder.endArray(); + + builder.startArray(CONNECTIONS.getPreferredName()); + for (Connection connection : connections.values()) { + builder.startObject(); + connection.toXContent(builder, extendedParams, vertexNumbers); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java new file mode 100644 index 0000000000000..70ec61067f5b8 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A Hop represents one of potentially many stages in a graph exploration. + * Each Hop identifies one or more fields in which it will attempt to find + * terms that are significantly connected to the previous Hop. Each field is identified + * using a {@link VertexRequest} + * + *

    An example series of Hops on webserver logs would be: + *

      + *
    1. an initial Hop to find + * the top ten IPAddresses trying to access urls containing the word "admin"
    2. + *
    3. a secondary Hop to see which other URLs those IPAddresses were trying to access
    4. + *
    + * + *

    + * Optionally, each hop can contain a "guiding query" that further limits the set of documents considered. + * In our weblog example above we might choose to constrain the second hop to only look at log records that + * had a reponse code of 404. + *

    + *

    + * If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating + * the fields that will be examined at each stage. + *

    + * + */ +public class Hop implements ToXContentFragment{ + final Hop parentHop; + List vertices = null; + QueryBuilder guidingQuery = null; + + public Hop(Hop parent) { + this.parentHop = parent; + } + + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { + + if (getEffectiveVertexRequests().size() == 0) { + validationException = ValidateActions.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE, validationException); + } + return validationException; + + } + + public Hop getParentHop() { + return parentHop; + } + + void writeTo(StreamOutput out) throws IOException { + out.writeOptionalNamedWriteable(guidingQuery); + if (vertices == null) { + out.writeVInt(0); + } else { + out.writeVInt(vertices.size()); + for (VertexRequest vr : vertices) { + vr.writeTo(out); + } + } + } + + void readFrom(StreamInput in) throws IOException { + guidingQuery = in.readOptionalNamedWriteable(QueryBuilder.class); + int size = in.readVInt(); + if (size > 0) { + vertices = new ArrayList<>(); + for (int i = 0; i < size; i++) { + VertexRequest vr = new VertexRequest(); + vr.readFrom(in); + vertices.add(vr); + } + } + } + + public QueryBuilder guidingQuery() { + if (guidingQuery != null) { + return guidingQuery; + } + return QueryBuilders.matchAllQuery(); + } + + /** + * Add a field in which this {@link Hop} will look for terms that are highly linked to + * previous hops and optionally the guiding query. + * + * @param fieldName a field in the chosen index + */ + public VertexRequest addVertexRequest(String fieldName) { + if (vertices == null) { + vertices = new ArrayList<>(); + } + VertexRequest vr = new VertexRequest(); + vr.fieldName(fieldName); + vertices.add(vr); + return vr; + } + + /** + * An optional parameter that focuses the exploration on documents that + * match the given query. + * + * @param queryBuilder any query + */ + public void guidingQuery(QueryBuilder queryBuilder) { + guidingQuery = queryBuilder; + } + + protected List getEffectiveVertexRequests() { + if (vertices != null) { + return vertices; + } + if (parentHop == null) { + return Collections.emptyList(); + } + // otherwise inherit settings from parent + return parentHop.getEffectiveVertexRequests(); + } + + public int getNumberVertexRequests() { + return getEffectiveVertexRequests().size(); + } + + public VertexRequest getVertexRequest(int requestNumber) { + return getEffectiveVertexRequests().get(requestNumber); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java new file mode 100644 index 0000000000000..cfc26f44fac04 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A vertex in a graph response represents a single term (a field and value pair) + * which appears in one or more documents found as part of the graph exploration. + * + * A vertex term could be a bank account number, an email address, a hashtag or any + * other term that appears in documents and is interesting to represent in a network. + */ +public class Vertex implements ToXContentFragment { + + private final String field; + private final String term; + private double weight; + private final int depth; + private final long bg; + private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + + + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { + super(); + this.field = field; + this.term = term; + this.weight = weight; + this.depth = depth; + this.bg = bg; + this.fg = fg; + } + + static Vertex readFrom(StreamInput in) throws IOException { + return new Vertex(in.readString(), in.readString(), in.readDouble(), in.readVInt(), in.readVLong(), in.readVLong()); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeString(term); + out.writeDouble(weight); + out.writeVInt(depth); + out.writeVLong(bg); + out.writeVLong(fg); + } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); + if (returnDetailedInfo) { + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); + } + return builder; + } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + + /** + * @return a {@link VertexId} object that uniquely identifies this Vertex + */ + public VertexId getId() { + return createId(field, term); + } + + /** + * A convenience method for creating a {@link VertexId} + * @param field the field + * @param term the term + * @return a {@link VertexId} that can be used for looking up vertices + */ + public static VertexId createId(String field, String term) { + return new VertexId(field,term); + } + + @Override + public String toString() { + return getId().toString(); + } + + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + + /** + * The weight of a vertex is an accumulation of all of the {@link Connection}s + * that are linked to this {@link Vertex} as part of a graph exploration. + * It is used internally to identify the most interesting vertices to be returned. + * @return a measure of the {@link Vertex}'s relative importance. + */ + public double getWeight() { + return weight; + } + + public void setWeight(final double weight) { + this.weight = weight; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * @return the number of documents in the index that contain this term (see bg_count in + * + * the significant_terms aggregation) + */ + public long getBg() { + return bg; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * Together with {@link #getBg()} these numbers are used to derive the significance of a term. + * @return the number of documents in the sample of best matching documents that contain this term (see fg_count in + * + * the significant_terms aggregation) + */ + public long getFg() { + return fg; + } + + public void setFg(final long fg) { + this.fg = fg; + } + + /** + * @return the sequence number in the series of hops where this Vertex term was first encountered + */ + public int getHopDepth() { + return depth; + } + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Vertex} + */ + public static class VertexId { + private final String field; + private final String term; + + public VertexId(String field, String term) { + this.field = field; + this.term = term; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + VertexId vertexId = (VertexId) o; + + if (field != null ? !field.equals(vertexId.field) : vertexId.field != null) + return false; + if (term != null ? !term.equals(vertexId.term) : vertexId.term != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = field != null ? field.hashCode() : 0; + result = 31 * result + (term != null ? term.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return field + ":" + term; + } + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java new file mode 100644 index 0000000000000..116497fe2301c --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * A request to identify terms from a choice of field as part of a {@link Hop}. + * Optionally, a set of terms can be provided that are used as an exclusion or + * inclusion list to filter which terms are considered. + * + */ +public class VertexRequest implements ToXContentObject { + private String fieldName; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; + private Map includes; + private Set excludes; + public static final int DEFAULT_MIN_DOC_COUNT = 3; + private int minDocCount = DEFAULT_MIN_DOC_COUNT; + public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2; + private int shardMinDocCount = DEFAULT_SHARD_MIN_DOC_COUNT; + + + public VertexRequest() { + + } + + void readFrom(StreamInput in) throws IOException { + fieldName = in.readString(); + size = in.readVInt(); + minDocCount = in.readVInt(); + shardMinDocCount = in.readVInt(); + + int numIncludes = in.readVInt(); + if (numIncludes > 0) { + includes = new HashMap<>(); + for (int i = 0; i < numIncludes; i++) { + TermBoost tb = new TermBoost(); + tb.readFrom(in); + includes.put(tb.term, tb); + } + } + + int numExcludes = in.readVInt(); + if (numExcludes > 0) { + excludes = new HashSet<>(); + for (int i = 0; i < numExcludes; i++) { + excludes.add(in.readString()); + } + } + + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeVInt(size); + out.writeVInt(minDocCount); + out.writeVInt(shardMinDocCount); + + if (includes != null) { + out.writeVInt(includes.size()); + for (TermBoost tb : includes.values()) { + tb.writeTo(out); + } + } else { + out.writeVInt(0); + } + + if (excludes != null) { + out.writeVInt(excludes.size()); + for (String term : excludes) { + out.writeString(term); + } + } else { + out.writeVInt(0); + } + } + + public String fieldName() { + return fieldName; + } + + public VertexRequest fieldName(String fieldName) { + this.fieldName = fieldName; + return this; + } + + public int size() { + return size; + } + + /** + * @param size The maximum number of terms that should be returned from this field as part of this {@link Hop} + */ + public VertexRequest size(int size) { + this.size = size; + return this; + } + + public boolean hasIncludeClauses() { + return includes != null && includes.size() > 0; + } + + public boolean hasExcludeClauses() { + return excludes != null && excludes.size() > 0; + } + + /** + * Adds a term that should be excluded from results + * @param term A term to be excluded + */ + public void addExclude(String term) { + if (includes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (excludes == null) { + excludes = new HashSet<>(); + } + excludes.add(term); + } + + /** + * Adds a term to the set of allowed values - the boost defines the relative + * importance when pursuing connections in subsequent {@link Hop}s. The boost value + * appears as part of the query. + * @param term a required term + * @param boost an optional boost + */ + public void addInclude(String term, float boost) { + if (excludes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (includes == null) { + includes = new HashMap<>(); + } + includes.put(term, new TermBoost(term, boost)); + } + + public TermBoost[] includeValues() { + return includes.values().toArray(new TermBoost[includes.size()]); + } + + public String[] includeValuesAsStringArray() { + String[] result = new String[includes.size()]; + int i = 0; + for (TermBoost tb : includes.values()) { + result[i++] = tb.term; + } + return result; + } + + public String[] excludesAsArray() { + return excludes.toArray(new String[excludes.size()]); + } + + public int minDocCount() { + return minDocCount; + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest minDocCount(int value) { + minDocCount = value; + return this; + } + + + public int shardMinDocCount() { + return Math.min(shardMinDocCount, minDocCount); + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest shardMinDocCount(int value) { + shardMinDocCount = value; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java similarity index 86% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java rename to x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java index 216990d9f0ec0..f4f666074a118 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -18,7 +18,7 @@ */ /** - * Request and Response objects for the default distribution's Security + * Request and Response objects for the default distribution's Graph * APIs. */ -package org.elasticsearch.protocol.xpack.security; +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 0000000000000..9c4a76fdcecf1 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices", "connections"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent( + NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java index c0111e57c7448..d1ee4f2d9e104 100644 --- a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java +++ b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.audit; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.http.message.BasicHeader; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -111,10 +110,12 @@ public NamedWriteableRegistry getNamedWriteableRegistry() { } public void testIndexAuditTrailWorking() throws Exception { - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())))); - assertThat(response.getStatusLine().getStatusCode(), is(200)); + Request request = new Request("GET", "/"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); + request.setOptions(options); + Response response = getRestClient().performRequest(request); final AtomicReference lastClusterState = new AtomicReference<>(); final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER)); diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle new file mode 100644 index 0000000000000..03f2a56987310 --- /dev/null +++ b/x-pack/qa/evil-tests/build.gradle @@ -0,0 +1,9 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" +} + +test { + systemProperty 'tests.security.manager', 'false' +} diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java new file mode 100644 index 0000000000000..2dfd314ffb06e --- /dev/null +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class EvilSchedulerEngineTests extends ESTestCase { + + public void testOutOfMemoryErrorWhileTriggeredIsRethrownAndIsUncaught() throws InterruptedException { + final AtomicReference maybeFatal = new AtomicReference<>(); + final CountDownLatch uncaughtLatuch = new CountDownLatch(1); + final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); + try { + /* + * We want to test that the out of memory error thrown from the scheduler engine goes uncaught on another thread; this gives us + * confidence that an error thrown during a triggered event will lead to the node being torn down. + */ + final AtomicReference maybeThread = new AtomicReference<>(); + Thread.setDefaultUncaughtExceptionHandler((t, e) -> { + maybeFatal.set(e); + maybeThread.set(Thread.currentThread()); + uncaughtLatuch.countDown(); + }); + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final AtomicBoolean trigger = new AtomicBoolean(); + engine.register(event -> { + if (trigger.compareAndSet(false, true)) { + throw new OutOfMemoryError("640K ought to be enough for anybody"); + } else { + fail("listener invoked twice"); + } + }); + final CountDownLatch schedulerLatch = new CountDownLatch(1); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (schedulerLatch.getCount() == 1) { + schedulerLatch.countDown(); + return 0; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + uncaughtLatuch.await(); + assertTrue(trigger.get()); + assertNotNull(maybeFatal.get()); + assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class)); + assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody"))); + assertNotNull(maybeThread.get()); + assertThat(maybeThread.get(), not(equalTo(Thread.currentThread()))); // the error should be rethrown on another thread + schedulerLatch.await(); + verifyNoMoreInteractions(mockLogger); // we never logged anything + } finally { + engine.stop(); + } + } finally { + // restore the uncaught exception handler + Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); + } + } + +} diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3cf2970120675..ab8f9172b690c 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -11,7 +11,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile (project(path: xpackModule('security'), configuration: 'runtime')) { // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency. // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper @@ -249,7 +250,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 24303b8342b7e..7c4eda37d2fb0 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -392,6 +392,8 @@ public void testRollupIDSchemeAfterRestart() throws Exception { indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); client().performRequest(indexRequest); + assertRollUpJob("rollup-id-test"); + // stop the rollup job to force a state save, which will upgrade the ID final Request stopRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-id-test/_stop"); Map stopRollupJobResponse = entityAsMap(client().performRequest(stopRollupJobRequest)); diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle index 928280b6584bd..e69de29bb2d1d 100644 --- a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle +++ b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle @@ -1,8 +0,0 @@ -import org.elasticsearch.gradle.test.RestIntegTestTask - -// Skip test on FIPS FIXME https://github.com/elastic/elasticsearch/issues/32737 -if (project.inFipsJvm) { - tasks.withType(RestIntegTestTask) { - enabled = false - } -} diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 59667d9ee7809..f680a45bd7f57 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java deleted file mode 100644 index e7381050260c4..0000000000000 --- a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.io.IOException; -import java.net.URLEncoder; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.common.xcontent.XContentType.JSON; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class MlBasicMultiNodeIT extends ESRestTestCase { - - @SuppressWarnings("unchecked") - public void testMachineLearningInstalled() throws Exception { - Response response = client().performRequest("get", "/_xpack"); - assertEquals(200, response.getStatusLine().getStatusCode()); - Map features = (Map) responseEntityToMap(response).get("features"); - Map ml = (Map) features.get("ml"); - assertNotNull(ml); - assertTrue((Boolean) ml.get("available")); - assertTrue((Boolean) ml.get("enabled")); - } - - public void testInvalidJob() throws Exception { - // The job name is invalid because it contains a space - String jobId = "invalid job"; - ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); - assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); - // If validation of the invalid job is not done until after transportation to the master node then the - // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to - // validate before transportation to avoid this. This test must be done in a multi-node cluster to have - // a chance of catching a problem, hence it is here rather than in the single node integration tests. - assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); - } - - public void testMiniFarequote() throws Exception { - String jobId = "mini-farequote-job"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(2, responseBody.get("processed_record_count")); - assertEquals(4, responseBody.get("processed_field_count")); - assertEquals(177, responseBody.get("input_bytes")); - assertEquals(6, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - assertEquals(4, dataCountsDoc.get("processed_field_count")); - assertEquals(177, dataCountsDoc.get("input_bytes")); - assertEquals(6, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(0, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteWithDatafeeder() throws Exception { - String mappings = "{" - + " \"mappings\": {" - + " \"response\": {" - + " \"properties\": {" - + " \"time\": { \"type\":\"date\"}," - + " \"airline\": { \"type\":\"keyword\"}," - + " \"responsetime\": { \"type\":\"float\"}" - + " }" - + " }" - + " }" - + "}"; - client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); - - // Ensure all data is searchable - client().performRequest("post", "_refresh"); - - String jobId = "mini-farequote-with-data-feeder-job"; - createFarequoteJob(jobId); - String datafeedId = "bar"; - createDatafeed(datafeedId, jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start", - Collections.singletonMap("start", "0")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("started", true), responseEntityToMap(response)); - - assertBusy(() -> { - try { - Response statsResponse = - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, statsResponse.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(statsResponse).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("input_record_count")); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("stopped", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteReopen() throws Exception { - String jobId = "mini-farequote-reopen"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + - "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + - "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + - "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(446, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - // feed some more data points - postData = - "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + - "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + - "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + - "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + - "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(442, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(1000, responseBody.get("bucket_count")); - - // unintuitive: should return the earliest record timestamp of this feed??? - assertEquals(null, responseBody.get("earliest_record_timestamp")); - assertEquals(1407082000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - // counts should be summed up - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(10, dataCountsDoc.get("processed_record_count")); - assertEquals(20, dataCountsDoc.get("processed_field_count")); - assertEquals(888, dataCountsDoc.get("input_bytes")); - assertEquals(30, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(1000, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - private Response createDatafeed(String datafeedId, String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.array("indexes", "airline-data"); - xContentBuilder.array("types", "response"); - xContentBuilder.field("_source", true); - xContentBuilder.endObject(); - return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private Response createFarequoteJob(String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8"), - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private static Map responseEntityToMap(Response response) throws IOException { - return XContentHelper.convertToMap(JSON.xContent(), response.getEntity().getContent(), false); - } - - private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) - throws IOException { - Map asMap = responseEntityToMap(response); - assertThat(asMap.size(), equalTo(2)); - assertThat(asMap.get("flushed"), is(true)); - assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); - } -} diff --git a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java deleted file mode 100644 index 3bb9566e5bf17..0000000000000 --- a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.util.Collections; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; - -public class MlPluginDisabledIT extends ESRestTestCase { - - /** - * Check that when the ml plugin is disabled, you cannot create a job as the - * rest handler is not registered - */ - public void testActionsFail() throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("actions-fail-job", "foo"); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("put", - MachineLearning.BASE_PATH + "anomaly_detectors/foo", Collections.emptyMap(), - new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON))); - assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); - } -} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java new file mode 100644 index 0000000000000..95ec9728842c6 --- /dev/null +++ b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class CloseJobsIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + public void testCloseJobsAcceptsOptionsFromPayload() throws Exception { + + Request request = new Request("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + "job-that-doesnot-exist*" + "/_close"); + request.setJsonEntity("{\"allow_no_jobs\":false}"); + request.setOptions(RequestOptions.DEFAULT); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + + request.setJsonEntity("{\"allow_no_jobs\":true}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertEquals(responseAsString, "{\"closed\":true}"); + } + + private static String responseEntityToString(Response response) throws IOException { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } +} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java deleted file mode 100644 index 07529acdb8815..0000000000000 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ /dev/null @@ -1,701 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; -import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; -import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; -import org.junit.After; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.Locale; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; - -public class MlJobIT extends ESRestTestCase { - - private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - - @Override - protected Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - - public void testPutJob_GivenFarequoteConfig() throws Exception { - Response response = createFarequoteJob("given-farequote-config-job"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); - } - - public void testGetJob_GivenNoSuchJob() throws Exception { - ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats")); - - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); - assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'")); - } - - public void testGetJob_GivenJobExists() throws Exception { - createFarequoteJob("get-job_given-job-exists-job"); - - Response response = client().performRequest("get", - MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\"")); - } - - public void testGetJobs_GivenSingleJob() throws Exception { - String jobId = "get-jobs_given-single-job-job"; - createFarequoteJob(jobId); - - // Explicit _all - Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); - - // Implicit _all - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); - } - - public void testGetJobs_GivenMultipleJobs() throws Exception { - createFarequoteJob("given-multiple-jobs-job-1"); - createFarequoteJob("given-multiple-jobs-job-2"); - createFarequoteJob("given-multiple-jobs-job-3"); - - // Explicit _all - Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":3")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); - - // Implicit _all - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); - - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":3")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); - assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); - } - - private Response createFarequoteJob(String jobId) throws IOException { - String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" - + " \"analysis_config\" : {\n" + " \"bucket_span\": \"3600s\",\n" - + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]\n" - + " },\n" + " \"data_description\" : {\n" + " \"field_delimiter\":\",\",\n" + " " + - "\"time_field\":\"time\",\n" - + " \"time_format\":\"yyyy-MM-dd HH:mm:ssX\"\n" + " }\n" + "}"; - - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); - } - - public void testCantCreateJobWithSameID() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"%s\"}"; - - String jobConfig = String.format(Locale.ROOT, jobTemplate, "index-1"); - - String jobId = "cant-create-job-with-same-id-job"; - Response response = client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId , - Collections.emptyMap(), - new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - final String jobConfig2 = String.format(Locale.ROOT, jobTemplate, "index-2"); - ResponseException e = expectThrows(ResponseException.class, - () ->client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, - Collections.emptyMap(), new StringEntity(jobConfig2, ContentType.APPLICATION_JSON))); - - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used.")); - } - - public void testCreateJobsWithIndexNameOption() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"%s\"}"; - - String jobId1 = "create-jobs-with-index-name-option-job-1"; - String indexName = "non-default-index"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, indexName); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - String jobId2 = "create-jobs-with-index-name-option-job-2"; - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // With security enabled GET _aliases throws an index_not_found_exception - // if no aliases have been created. In multi-node tests the alias may not - // appear immediately so wait here. - assertBusy(() -> { - try { - Response aliasesResponse = client().performRequest("get", "_aliases"); - assertEquals(200, aliasesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(aliasesResponse); - assertThat(responseAsString, - containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName) + "\":{\"aliases\":{")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) - + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId1 + "\",\"boost\":1.0}}}}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId1) + "\":{}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) - + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId2 + "\",\"boost\":1.0}}}}")); - assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId2) + "\":{}")); - } catch (ResponseException e) { - throw new AssertionError(e); - } - }); - - Response indicesResponse = client().performRequest("get", "_cat/indices"); - assertEquals(200, indicesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(indicesResponse); - assertThat(responseAsString, - containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)))); - - String bucketResult = String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", - jobId1, "1234", 1); - String id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1234", 300); - response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, - Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); - assertEquals(201, response.getStatusLine().getStatusCode()); - - bucketResult = String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", - jobId1, "1236", 1); - id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1236", 300); - response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, - Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); - assertEquals(201, response.getStatusLine().getStatusCode()); - - client().performRequest("post", "_refresh"); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":2")); - - response = client().performRequest("get", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"total\":2")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check that indices still exist, but are empty and aliases are gone - response = client().performRequest("get", "_aliases"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); - assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); //job2 still exists - - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); - - client().performRequest("post", "_refresh"); - - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - } - - public void testCreateJobInSharedIndexUpdatesMapping() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {}\n" + - "}"; - - String jobId1 = "create-job-in-shared-index-updates-mapping-job-1"; - String byFieldName1 = "responsetime"; - String jobId2 = "create-job-in-shared-index-updates-mapping-job-2"; - String byFieldName2 = "cpu-usage"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping contains the first by_field_name - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX - + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, not(containsString(byFieldName2))); - - jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping now contains both fields - response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX - + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, containsString(byFieldName2)); - } - - public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {},\n" + - " \"results_index_name\" : \"shared-index\"}"; - - String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1"; - String byFieldName1 = "responsetime"; - String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2"; - String byFieldName2 = "cpu-usage"; - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping contains the first by_field_name - response = client().performRequest("get", - AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, not(containsString(byFieldName2))); - - jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // Check the index mapping now contains both fields - response = client().performRequest("get", - AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(byFieldName1)); - assertThat(responseAsString, containsString(byFieldName2)); - } - - public void testCreateJob_WithClashingFieldMappingsFails() throws Exception { - String jobTemplate = "{\n" + - " \"analysis_config\" : {\n" + - " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + - " },\n" + - " \"data_description\": {}\n" + - "}"; - - String jobId1 = "job-with-response-field"; - String byFieldName1; - String jobId2 = "job-will-fail-with-mapping-error-on-response-field"; - String byFieldName2; - // we should get the friendly advice nomatter which way around the clashing fields are seen - if (randomBoolean()) { - byFieldName1 = "response"; - byFieldName2 = "response.time"; - } else { - byFieldName1 = "response.time"; - byFieldName2 = "response"; - } - String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); - - Response response = client().performRequest("put", MachineLearning.BASE_PATH - + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - final String failingJobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); - ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2, - Collections.emptyMap(), new StringEntity(failingJobConfig, ContentType.APPLICATION_JSON))); - - assertThat(e.getMessage(), - containsString("This job would cause a mapping clash with existing field [response] - " + - "avoid the clash by assigning a dedicated results index")); - } - - public void testDeleteJob() throws Exception { - String jobId = "delete-job-job"; - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check that the index still exists (it's shared by default) - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - assertBusy(() -> { - try { - Response r = client().performRequest("get", indexName + "/_count"); - assertEquals(200, r.getStatusLine().getStatusCode()); - String responseString = responseEntityToString(r); - assertThat(responseString, containsString("\"count\":0")); - } catch (Exception e) { - fail(e.getMessage()); - } - - }); - - // check that the job itself is gone - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDeleteJobAfterMissingIndex() throws Exception { - String jobId = "delete-job-after-missing-index-job"; - String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - - // Manually delete the index so that we can test that deletion proceeds - // normally anyway - response = client().performRequest("delete", indexName); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - // check index was deleted - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, not(containsString(aliasName))); - assertThat(responseAsString, not(containsString(indexName))); - - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDeleteJobAfterMissingAliases() throws Exception { - String jobId = "delete-job-after-missing-alias-job"; - String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); - String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - // With security enabled cat aliases throws an index_not_found_exception - // if no aliases have been created. In multi-node tests the alias may not - // appear immediately so wait here. - assertBusy(() -> { - try { - Response aliasesResponse = client().performRequest(new Request("get", "_cat/aliases")); - assertEquals(200, aliasesResponse.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(aliasesResponse); - assertThat(responseAsString, containsString(readAliasName)); - assertThat(responseAsString, containsString(writeAliasName)); - } catch (ResponseException e) { - throw new AssertionError(e); - } - }); - - // Manually delete the aliases so that we can test that deletion proceeds - // normally anyway - Response response = client().performRequest("delete", indexName + "/_alias/" + readAliasName); - assertEquals(200, response.getStatusLine().getStatusCode()); - response = client().performRequest("delete", indexName + "/_alias/" + writeAliasName); - assertEquals(200, response.getStatusLine().getStatusCode()); - - // check aliases were deleted - expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + readAliasName)); - expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + writeAliasName)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - } - - public void testMultiIndexDelete() throws Exception { - String jobId = "multi-index-delete-job"; - String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; - createFarequoteJob(jobId); - - Response response = client().performRequest("put", indexName + "-001"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("put", indexName + "-002"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - String responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - assertThat(responseAsString, containsString(indexName + "-001")); - assertThat(responseAsString, containsString(indexName + "-002")); - - // Add some documents to each index to make sure the DBQ clears them out - String recordResult = - String.format(Locale.ROOT, - "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"bucket_span\":%d, \"result_type\":\"record\"}", - jobId, 123, 1); - client().performRequest("put", indexName + "/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - client().performRequest("put", indexName + "-001/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - client().performRequest("put", indexName + "-002/doc/" + 123, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - - // Also index a few through the alias for the first job - client().performRequest("put", indexName + "/doc/" + 456, - Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); - - - client().performRequest("post", "_refresh"); - - // check for the documents - response = client().performRequest("get", indexName+ "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":2")); - - response = client().performRequest("get", indexName + "-001/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - - response = client().performRequest("get", indexName + "-002/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":1")); - - // Delete - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - - client().performRequest("post", "_refresh"); - - // check that the indices still exist but are empty - response = client().performRequest("get", "_cat/indices"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString(indexName)); - assertThat(responseAsString, containsString(indexName + "-001")); - assertThat(responseAsString, containsString(indexName + "-002")); - - response = client().performRequest("get", indexName + "/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - response = client().performRequest("get", indexName + "-001/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - response = client().performRequest("get", indexName + "-002/_count"); - assertEquals(200, response.getStatusLine().getStatusCode()); - responseAsString = responseEntityToString(response); - assertThat(responseAsString, containsString("\"count\":0")); - - - expectThrows(ResponseException.class, () -> - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); - } - - public void testDelete_multipleRequest() throws Exception { - String jobId = "delete-job-mulitple-times"; - createFarequoteJob(jobId); - - ConcurrentMapLong responses = ConcurrentCollections.newConcurrentMapLong(); - ConcurrentMapLong responseExceptions = ConcurrentCollections.newConcurrentMapLong(); - AtomicReference ioe = new AtomicReference<>(); - AtomicInteger recreationGuard = new AtomicInteger(0); - AtomicReference recreationResponse = new AtomicReference<>(); - AtomicReference recreationException = new AtomicReference<>(); - - Runnable deleteJob = () -> { - try { - boolean forceDelete = randomBoolean(); - String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId; - if (forceDelete) { - url += "?force=true"; - } - Response response = client().performRequest("delete", url); - responses.put(Thread.currentThread().getId(), response); - } catch (ResponseException re) { - responseExceptions.put(Thread.currentThread().getId(), re); - } catch (IOException e) { - ioe.set(e); - } - - // Immediately after the first deletion finishes, recreate the job. This should pick up - // race conditions where another delete request deletes part of the newly created job. - if (recreationGuard.getAndIncrement() == 0) { - try { - recreationResponse.set(createFarequoteJob(jobId)); - } catch (ResponseException re) { - recreationException.set(re); - } catch (IOException e) { - ioe.set(e); - } - } - }; - - // The idea is to hit the situation where one request waits for - // the other to complete. This is difficult to schedule but - // hopefully it will happen in CI - int numThreads = 5; - Thread [] threads = new Thread[numThreads]; - for (int i=0; i { - final Map params = new HashMap<>(2); - params.put("level", "shards"); - params.put("filter_path", "**.seq_no"); - final Response response = client().performRequest("GET", "/test-index/_stats", params); + final Request request = new Request("GET", "/test-index/_stats"); + request.addParameter("level", "shards"); + request.addParameter("filter_path", "**.seq_no"); + final Response response = client().performRequest(request); final ObjectPath path = ObjectPath.createFromResponse(response); // int looks funny here since global checkpoints are longs but the response parser does not know enough to treat them as long final int shard0GlobalCheckpoint = path.evaluate("indices.test-index.shards.0.0.seq_no.global_checkpoint"); diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index 43ad4dc0a45a2..fb9c665b2bf1c 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -173,7 +173,7 @@ public void testBigRollup() throws Exception { " \"date_histo\": {\n" + " \"date_histogram\": {\n" + " \"field\": \"timestamp\",\n" + - " \"interval\": \"1h\",\n" + + " \"interval\": \"60m\",\n" + " \"format\": \"date_time\"\n" + " },\n" + " \"aggs\": {\n" + diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 24cd6184afa63..bb9a979928978 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -5,7 +5,8 @@ apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.vagrantsupport' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 097d343b27984..97c0e8e17fee7 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:reindex') diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 21ac4414d86b2..5774e5d78561d 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -7,7 +7,8 @@ import java.nio.charset.StandardCharsets apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index b983caa866937..90da6cf4e58b8 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -10,7 +10,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } @@ -157,6 +158,7 @@ subprojects { } else { String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' } setting 'xpack.watcher.encrypt_sensitive_data', 'true' } @@ -198,6 +200,9 @@ subprojects { setting 'xpack.watcher.encrypt_sensitive_data', 'true' keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" } + if (version.before('6.0.0')) { + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } } } @@ -284,7 +289,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('watcher')) } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index cb036b9d13a26..ba0f4d5091e0f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Test get old cluster job": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml index 061a242a78d30..3a3334f6907e9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Put job on the old cluster and post some data": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 1da16e79cbe61..bb47524b41d87 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - - do: cluster.health: wait_for_status: green diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle index 5aaa1ed1eff9b..03505e01dedd8 100644 --- a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle +++ b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle @@ -1,10 +1 @@ -import org.elasticsearch.gradle.test.RestIntegTestTask - -// Skip test on FIPS FIXME https://github.com/elastic/elasticsearch/issues/32737 -if (project.inFipsJvm) { - tasks.withType(RestIntegTestTask) { - enabled = false - } -} - group = "${group}.x-pack.qa.rolling-upgrade.with-system-key" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 752ec6fb3071b..11e89d93c8e6a 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -6,7 +6,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile 'com.google.jimfs:jimfs:1.1' @@ -36,17 +37,30 @@ integTestCluster { setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.authc.realms.file.type', 'file' setting 'xpack.security.authc.realms.file.order', '0' + // SAML realm 1 (no authorization_realms) setting 'xpack.security.authc.realms.shibboleth.type', 'saml' setting 'xpack.security.authc.realms.shibboleth.order', '1' setting 'xpack.security.authc.realms.shibboleth.idp.entity_id', 'https://test.shibboleth.elastic.local/' setting 'xpack.security.authc.realms.shibboleth.idp.metadata.path', 'idp-metadata.xml' - setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock.http.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock1.http.elastic.local/' // The port in the ACS URL is fake - the test will bind the mock webserver // to a random port and then whenever it needs to connect to a URL on the // mock webserver it will replace 54321 with the real port - setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs' + setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs1' setting 'xpack.security.authc.realms.shibboleth.attributes.principal', 'uid' setting 'xpack.security.authc.realms.shibboleth.attributes.name', 'urn:oid:2.5.4.3' + // SAML realm 2 (uses authorization_realms) + setting 'xpack.security.authc.realms.shibboleth_native.type', 'saml' + setting 'xpack.security.authc.realms.shibboleth_native.order', '2' + setting 'xpack.security.authc.realms.shibboleth_native.idp.entity_id', 'https://test.shibboleth.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth_native.idp.metadata.path', 'idp-metadata.xml' + setting 'xpack.security.authc.realms.shibboleth_native.sp.entity_id', 'http://mock2.http.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth_native.sp.acs', 'http://localhost:54321/saml/acs2' + setting 'xpack.security.authc.realms.shibboleth_native.attributes.principal', 'uid' + setting 'xpack.security.authc.realms.shibboleth_native.authorization_realms', 'native' + setting 'xpack.security.authc.realms.native.type', 'native' + setting 'xpack.security.authc.realms.native.order', '3' + setting 'xpack.ml.enabled', 'false' extraConfigFile 'idp-metadata.xml', idpFixtureProject.file("src/main/resources/provision/generated/idp-metadata.xml") diff --git a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index bf4ad79c59d43..b3fc7dd0c2faa 100644 --- a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -24,8 +24,6 @@ import org.apache.http.cookie.Cookie; import org.apache.http.cookie.CookieOrigin; import org.apache.http.cookie.MalformedCookieException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.cookie.DefaultCookieSpec; @@ -39,9 +37,13 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.SecureString; @@ -49,6 +51,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.test.rest.ESRestTestCase; @@ -65,7 +68,6 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManager; import javax.net.ssl.X509ExtendedTrustManager; - import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -85,7 +87,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.contains; @@ -103,13 +104,16 @@ public class SamlAuthenticationIT extends ESRestTestCase { private static final String SP_LOGIN_PATH = "/saml/login"; - private static final String SP_ACS_PATH = "/saml/acs"; + private static final String SP_ACS_PATH_1 = "/saml/acs1"; + private static final String SP_ACS_PATH_2 = "/saml/acs2"; private static final String SAML_RESPONSE_FIELD = "SAMLResponse"; private static final String REQUEST_ID_COOKIE = "saml-request-id"; private static final String KIBANA_PASSWORD = "K1b@na K1b@na K1b@na"; private static HttpServer httpServer; + private URI acs; + @BeforeClass public static void setupHttpServer() throws IOException { InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0); @@ -134,7 +138,8 @@ public static void shutdownHttpServer() { @Before public void setupHttpContext() { httpServer.createContext(SP_LOGIN_PATH, wrapFailures(this::httpLogin)); - httpServer.createContext(SP_ACS_PATH, wrapFailures(this::httpAcs)); + httpServer.createContext(SP_ACS_PATH_1, wrapFailures(this::httpAcs)); + httpServer.createContext(SP_ACS_PATH_2, wrapFailures(this::httpAcs)); } /** @@ -158,7 +163,8 @@ private HttpHandler wrapFailures(HttpHandler handler) { @After public void clearHttpContext() { httpServer.removeContext(SP_LOGIN_PATH); - httpServer.removeContext(SP_ACS_PATH); + httpServer.removeContext(SP_ACS_PATH_1); + httpServer.removeContext(SP_ACS_PATH_2); } @Override @@ -176,9 +182,9 @@ protected Settings restAdminSettings() { */ @Before public void setKibanaPassword() throws IOException { - final HttpEntity json = new StringEntity("{ \"password\" : \"" + KIBANA_PASSWORD + "\" }", ContentType.APPLICATION_JSON); - final Response response = adminClient().performRequest("PUT", "/_xpack/security/user/kibana/_password", emptyMap(), json); - assertOK(response); + Request request = new Request("PUT", "/_xpack/security/user/kibana/_password"); + request.setJsonEntity("{ \"password\" : \"" + KIBANA_PASSWORD + "\" }"); + adminClient().performRequest(request); } /** @@ -188,20 +194,33 @@ public void setKibanaPassword() throws IOException { */ @Before public void setupRoleMapping() throws IOException { - final StringEntity json = new StringEntity(Strings // top-level - .toString(XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .array("roles", new String[] { "kibana_user"} ) - .field("enabled", true) - .startObject("rules") + Request request = new Request("PUT", "/_xpack/security/role_mapping/thor-kibana"); + request.setJsonEntity(Strings.toString(XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .array("roles", new String[] { "kibana_user"} ) + .field("enabled", true) + .startObject("rules") .startArray("all") - .startObject().startObject("field").field("username", "thor").endObject().endObject() - .startObject().startObject("field").field("realm.name", "shibboleth").endObject().endObject() + .startObject().startObject("field").field("username", "thor").endObject().endObject() + .startObject().startObject("field").field("realm.name", "shibboleth").endObject().endObject() .endArray() // "all" - .endObject() // "rules" - .endObject()), ContentType.APPLICATION_JSON); + .endObject() // "rules" + .endObject())); + adminClient().performRequest(request); + } - final Response response = adminClient().performRequest("PUT", "/_xpack/security/role_mapping/thor-kibana", emptyMap(), json); + /** + * Create a native user for "thor" that is used for user-lookup (authorizing realms) + */ + @Before + public void setupNativeUser() throws IOException { + final Map body = MapBuilder.newMapBuilder() + .put("roles", Collections.singletonList("kibana_dashboard_only_user")) + .put("full_name", "Thor Son of Odin") + .put("password", randomAlphaOfLengthBetween(8, 16)) + .put("metadata", Collections.singletonMap("is_native", true)) + .map(); + final Response response = adminClient().performRequest(buildRequest("PUT", "/_xpack/security/user/thor", body)); assertOK(response); } @@ -221,7 +240,24 @@ public void setupRoleMapping() throws IOException { *
  • Uses that token to verify the user details
  • * */ - public void testLoginUser() throws Exception { + public void testLoginUserWithSamlRoleMapping() throws Exception { + // this ACS comes from the config in build.gradle + final Tuple authTokens = loginViaSaml("http://localhost:54321" + SP_ACS_PATH_1); + verifyElasticsearchAccessTokenForRoleMapping(authTokens.v1()); + final String accessToken = verifyElasticsearchRefreshToken(authTokens.v2()); + verifyElasticsearchAccessTokenForRoleMapping(accessToken); + } + + public void testLoginUserWithAuthorizingRealm() throws Exception { + // this ACS comes from the config in build.gradle + final Tuple authTokens = loginViaSaml("http://localhost:54321" + SP_ACS_PATH_2); + verifyElasticsearchAccessTokenForAuthorizingRealms(authTokens.v1()); + final String accessToken = verifyElasticsearchRefreshToken(authTokens.v2()); + verifyElasticsearchAccessTokenForAuthorizingRealms(accessToken); + } + + private Tuple loginViaSaml(String acs) throws Exception { + this.acs = new URI(acs); final BasicHttpContext context = new BasicHttpContext(); try (CloseableHttpClient client = getHttpClient()) { final URI loginUri = goToLoginPage(client, context); @@ -237,24 +273,21 @@ public void testLoginUser() throws Exception { final Object accessToken = result.get("access_token"); assertThat(accessToken, notNullValue()); assertThat(accessToken, instanceOf(String.class)); - verifyElasticsearchAccessToken((String) accessToken); final Object refreshToken = result.get("refresh_token"); assertThat(refreshToken, notNullValue()); assertThat(refreshToken, instanceOf(String.class)); - verifyElasticsearchRefreshToken((String) refreshToken); + + return new Tuple<>((String) accessToken, (String) refreshToken); } } /** * Verifies that the provided "Access Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) - * is for the expected user with the expected name and roles. + * is for the expected user with the expected name and roles if the user was created from Role-Mapping */ - private void verifyElasticsearchAccessToken(String accessToken) throws IOException { - final BasicHeader authorization = new BasicHeader("Authorization", "Bearer " + accessToken); - final Response response = client().performRequest("GET", "/_xpack/security/_authenticate", authorization); - assertOK(response); - final Map map = parseResponseAsMap(response.getEntity()); + private void verifyElasticsearchAccessTokenForRoleMapping(String accessToken) throws IOException { + final Map map = callAuthenticateApiUsingAccessToken(accessToken); assertThat(map.get("username"), equalTo("thor")); assertThat(map.get("full_name"), equalTo("Thor Odinson")); assertSingletonList(map.get("roles"), "kibana_user"); @@ -268,16 +301,37 @@ private void verifyElasticsearchAccessToken(String accessToken) throws IOExcepti } /** - * Verifies that the provided "Refresh Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) - * can be used to get a new valid access token and refresh token. + * Verifies that the provided "Access Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) + * is for the expected user with the expected name and roles if the user was retrieved from the native realm */ - private void verifyElasticsearchRefreshToken(String refreshToken) throws IOException { - final String body = "{ \"grant_type\":\"refresh_token\", \"refresh_token\":\"" + refreshToken + "\" }"; - final Response response = client().performRequest("POST", "/_xpack/security/oauth2/token", - emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + private void verifyElasticsearchAccessTokenForAuthorizingRealms(String accessToken) throws IOException { + final Map map = callAuthenticateApiUsingAccessToken(accessToken); + assertThat(map.get("username"), equalTo("thor")); + assertThat(map.get("full_name"), equalTo("Thor Son of Odin")); + assertSingletonList(map.get("roles"), "kibana_dashboard_only_user"); + + assertThat(map.get("metadata"), instanceOf(Map.class)); + final Map metadata = (Map) map.get("metadata"); + assertThat(metadata.get("is_native"), equalTo(true)); + } + + private Map callAuthenticateApiUsingAccessToken(String accessToken) throws IOException { + Request request = new Request("GET", "/_xpack/security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", "Bearer " + accessToken); + request.setOptions(options); + return entityAsMap(client().performRequest(request)); + } + + private String verifyElasticsearchRefreshToken(String refreshToken) throws IOException { + final Map body = MapBuilder.newMapBuilder() + .put("grant_type", "refresh_token") + .put("refresh_token", refreshToken) + .map(); + final Response response = client().performRequest(buildRequest("POST", "/_xpack/security/oauth2/token", body, kibanaAuth())); assertOK(response); - final Map result = parseResponseAsMap(response.getEntity()); + final Map result = entityAsMap(response); final Object newRefreshToken = result.get("refresh_token"); assertThat(newRefreshToken, notNullValue()); assertThat(newRefreshToken, instanceOf(String.class)); @@ -285,7 +339,7 @@ private void verifyElasticsearchRefreshToken(String refreshToken) throws IOExcep final Object accessToken = result.get("access_token"); assertThat(accessToken, notNullValue()); assertThat(accessToken, instanceOf(String.class)); - verifyElasticsearchAccessToken((String) accessToken); + return (String) accessToken; } /** @@ -351,7 +405,7 @@ private Tuple submitConsentForm(BasicHttpContext context, Closeable form.setEntity(new UrlEncodedFormEntity(params)); return execute(client, form, context, - response -> parseSamlSubmissionForm(response.getEntity().getContent())); + response -> parseSamlSubmissionForm(response.getEntity().getContent())); } /** @@ -361,14 +415,14 @@ private Tuple submitConsentForm(BasicHttpContext context, Closeable * @param saml The (deflated + base64 encoded) {@code SAMLResponse} parameter to post the ACS */ private Map submitSamlResponse(BasicHttpContext context, CloseableHttpClient client, URI acs, String saml) - throws IOException { + throws IOException { assertThat("SAML submission target", acs, notNullValue()); - assertThat(acs.getPath(), equalTo(SP_ACS_PATH)); + assertThat(acs, equalTo(this.acs)); assertThat("SAML submission content", saml, notNullValue()); // The ACS url provided from the SP is going to be wrong because the gradle // build doesn't know what the web server's port is, so it uses a fake one. - final HttpPost form = new HttpPost(getUrl(SP_ACS_PATH)); + final HttpPost form = new HttpPost(getUrl(this.acs.getPath())); List params = new ArrayList<>(); params.add(new BasicNameValuePair(SAML_RESPONSE_FIELD, saml)); form.setEntity(new UrlEncodedFormEntity(params)); @@ -463,13 +517,14 @@ private String getUrl(String path) { * sends a redirect to that page. */ private void httpLogin(HttpExchange http) throws IOException { - final Response prepare = client().performRequest("POST", "/_xpack/security/saml/prepare", - emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON), kibanaAuth()); + final Map body = Collections.singletonMap("acs", this.acs.toString()); + Request request = buildRequest("POST", "/_xpack/security/saml/prepare", body, kibanaAuth()); + final Response prepare = client().performRequest(request); assertOK(prepare); - final Map body = parseResponseAsMap(prepare.getEntity()); - logger.info("Created SAML authentication request {}", body); - http.getResponseHeaders().add("Set-Cookie", REQUEST_ID_COOKIE + "=" + body.get("id")); - http.getResponseHeaders().add("Location", (String) body.get("redirect")); + final Map responseBody = parseResponseAsMap(prepare.getEntity()); + logger.info("Created SAML authentication request {}", responseBody); + http.getResponseHeaders().add("Set-Cookie", REQUEST_ID_COOKIE + "=" + responseBody.get("id")); + http.getResponseHeaders().add("Location", (String) responseBody.get("redirect")); http.sendResponseHeaders(302, 0); http.close(); } @@ -504,9 +559,11 @@ private Response samlAuthenticate(HttpExchange http) throws IOException { final String id = getCookie(REQUEST_ID_COOKIE, http); assertThat(id, notNullValue()); - final String body = "{ \"content\" : \"" + saml + "\", \"ids\": [\"" + id + "\"] }"; - return client().performRequest("POST", "/_xpack/security/saml/authenticate", - emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + final Map body = MapBuilder.newMapBuilder() + .put("content", saml) + .put("ids", Collections.singletonList(id)) + .map(); + return client().performRequest(buildRequest("POST", "/_xpack/security/saml/authenticate", body, kibanaAuth())); } private List parseRequestForm(HttpExchange http) throws IOException { @@ -520,6 +577,7 @@ private String getCookie(String name, HttpExchange http) throws IOException { try { final String cookies = http.getRequestHeaders().getFirst("Cookie"); if (cookies == null) { + logger.warn("No cookies in: {}", http.getResponseHeaders()); return null; } Header header = new BasicHeader("Cookie", cookies); @@ -542,6 +600,20 @@ private static void assertSingletonList(Object value, String expectedElement) { assertThat(((List) value), contains(expectedElement)); } + private Request buildRequest(String method, String endpoint, Map body, Header... headers) throws IOException { + Request request = new Request(method, endpoint); + XContentBuilder builder = XContentFactory.jsonBuilder().map(body); + if (body != null) { + request.setJsonEntity(BytesReference.bytes(builder).utf8ToString()); + } + final RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : headers) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); + return request; + } + private static BasicHeader kibanaAuth() { final String auth = UsernamePasswordToken.basicAuthHeaderValue("kibana", new SecureString(KIBANA_PASSWORD.toCharArray())); return new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, auth); diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index 97945fb00efcd..e676e55a152d4 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 7aeed3ad62de6..aef4fc33f6abe 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -8,7 +8,7 @@ esplugin { } dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java index af3fb160e133f..dfd4a81ea2157 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java @@ -12,9 +12,9 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; public class CustomRealm extends Realm { diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java index e206de6e39272..d1435ebaa3c28 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle index 3a8a0cf100554..abc3564ca13f2 100644 --- a/x-pack/qa/security-migrate-tests/build.gradle +++ b/x-pack/qa/security-migrate-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java index e810e638f6895..4ac927c6646c1 100644 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java +++ b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.protocol.xpack.security.User; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool; import org.junit.Before; diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index adb159acf6f6b..c0801a38b570c 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java index 74f1223f4a6a1..860c30c0ddd55 100644 --- a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java +++ b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java @@ -5,8 +5,9 @@ */ package org.elasticsearch.xpack.security.authc.esnative.tool; -import org.apache.http.message.BasicHeader; import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; @@ -52,7 +53,7 @@ public void testSetupPasswordToolAutoSetup() throws Exception { final Path configPath = PathUtils.get(testConfigDir); setSystemPropsForTool(configPath); - Response nodesResponse = client().performRequest("GET", "/_nodes/http"); + Response nodesResponse = client().performRequest(new Request("GET", "/_nodes/http")); Map nodesMap = entityAsMap(nodesResponse); Map nodes = (Map) nodesMap.get("nodes"); @@ -97,15 +98,16 @@ public void testSetupPasswordToolAutoSetup() throws Exception { } }); - assertEquals(4, userPasswordMap.size()); + assertEquals(5, userPasswordMap.size()); userPasswordMap.entrySet().forEach(entry -> { final String basicHeader = "Basic " + Base64.getEncoder().encodeToString((entry.getKey() + ":" + entry.getValue()).getBytes(StandardCharsets.UTF_8)); try { - Response authenticateResponse = client().performRequest("GET", "/_xpack/security/_authenticate", - new BasicHeader("Authorization", basicHeader)); - assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); - Map userInfoMap = entityAsMap(authenticateResponse); + Request request = new Request("GET", "/_xpack/security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Authorization", basicHeader); + request.setOptions(options); + Map userInfoMap = entityAsMap(client().performRequest(request)); assertEquals(entry.getKey(), userInfoMap.get("username")); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/x-pack/qa/smoke-test-graph-with-security/build.gradle b/x-pack/qa/smoke-test-graph-with-security/build.gradle index 9cdfaffccfbce..f0f819b46d478 100644 --- a/x-pack/qa/smoke-test-graph-with-security/build.gradle +++ b/x-pack/qa/smoke-test-graph-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in graph rest test suite diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 8ce0cde76575a..7813ff3d3d56c 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher')) testCompile project(path: xpackModule('monitoring')) } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index d89d558f02fae..d3b9a974398da 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; @@ -23,7 +21,6 @@ import org.junit.After; import java.io.IOException; -import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -36,25 +33,25 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { @After public void cleanExporters() throws Exception { - String body = Strings.toString(jsonBuilder().startObject().startObject("transient") - .nullField("xpack.monitoring.exporters.*") - .endObject().endObject()); - assertOK(adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON))); - - assertOK(adminClient().performRequest("DELETE", ".watch*", Collections.emptyMap())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .nullField("xpack.monitoring.exporters.*") + .endObject().endObject())); + adminClient().performRequest(request); + adminClient().performRequest(new Request("DELETE", "/.watch*")); } public void testThatLocalExporterAddsWatches() throws Exception { String watchId = createMonitoringWatch(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_local_exporter.type", "local") - .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_local_exporter.type", "local") + .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -65,14 +62,14 @@ public void testThatHttpExporterAddsWatches() throws Exception { String watchId = createMonitoringWatch(); String httpHost = getHttpHost(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_http_exporter.type", "http") - .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) - .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_http_exporter.type", "http") + .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) + .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -80,15 +77,15 @@ public void testThatHttpExporterAddsWatches() throws Exception { } private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_xpack/watcher/watch/" + watchId)); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_xpack/watcher/watch/" + watchId))); String interval = path.evaluate("watch.trigger.schedule.interval"); assertThat(interval, is("1m")); } private void assertTotalWatchCount(int expectedWatches) throws Exception { assertBusy(() -> { - assertOK(client().performRequest("POST", ".watches/_refresh")); - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("POST", ".watches/_count")); + assertOK(client().performRequest(new Request("POST", "/.watches/_refresh"))); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("POST", "/.watches/_count"))); int count = path.evaluate("count"); assertThat(count, is(expectedWatches)); }); @@ -97,28 +94,28 @@ private void assertTotalWatchCount(int expectedWatches) throws Exception { private String createMonitoringWatch() throws Exception { String clusterUUID = getClusterUUID(); String watchId = clusterUUID + "_kibana_version_mismatch"; - String sampleWatch = WatchSourceBuilders.watchBuilder() + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(WatchSourceBuilders.watchBuilder() .trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES)))) .input(simpleInput()) .addAction("logme", ActionBuilders.loggingAction("foo")) - .buildAsBytes(XContentType.JSON).utf8ToString(); - client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), - new StringEntity(sampleWatch, ContentType.APPLICATION_JSON)); + .buildAsBytes(XContentType.JSON).utf8ToString()); + client().performRequest(request); return watchId; } private String getClusterUUID() throws Exception { - Response response = client().performRequest("GET", "_cluster/state/metadata", Collections.emptyMap()); + Response response = client().performRequest(new Request("GET", "/_cluster/state/metadata")); ObjectPath objectPath = ObjectPath.createFromResponse(response); String clusterUUID = objectPath.evaluate("metadata.cluster_uuid"); return clusterUUID; } public String getHttpHost() throws IOException { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_cluster/state", Collections.emptyMap())); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_cluster/state"))); String masterNodeId = path.evaluate("master_node"); - ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest("GET", "_nodes", Collections.emptyMap())); + ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_nodes"))); String httpHost = nodesPath.evaluate("nodes." + masterNodeId + ".http.publish_address"); return httpHost; } diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 53533bd9b87f3..4f338d07fb531 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -15,7 +15,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } String outputDir = "${buildDir}/generated-resources/${project.name}" @@ -138,4 +138,4 @@ processTestResources { inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } -} \ No newline at end of file +} diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index b66903af18bfb..3b7661eeeb05a 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } ext.pluginsCount = 0 diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle index d921c5f5b6605..48b525ba3dae9 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/build.gradle +++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index a843641be801f..50e217b28b270 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in watcher rest test suite diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index a989bb476118f..0c4afff509e94 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -7,9 +7,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; +import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -49,9 +47,9 @@ public void startWatcher() throws Exception { emptyList(), emptyMap()); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request request = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + request.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(request); assertBusy(() -> { ClientYamlTestResponse response = @@ -129,4 +127,3 @@ protected Settings restAdminSettings() { .build(); } } - diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 1c8204aa1ec62..17fbf0769fd47 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -21,7 +20,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -41,27 +39,28 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { - StringEntity entity = new StringEntity("{ \"value\" : \"15\" }", ContentType.APPLICATION_JSON); - assertOK(adminClient().performRequest("PUT", "my_test_index/doc/1", Collections.singletonMap("refresh", "true"), entity)); + Request createAllowedDoc = new Request("PUT", "/my_test_index/doc/1"); + createAllowedDoc.setJsonEntity("{ \"value\" : \"15\" }"); + createAllowedDoc.addParameter("refresh", "true"); + adminClient().performRequest(createAllowedDoc); // delete the watcher history to not clutter with entries from other test - adminClient().performRequest("DELETE", ".watcher-history-*", Collections.emptyMap()); + adminClient().performRequest(new Request("DELETE", ".watcher-history-*")); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request createNotAllowedDoc = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + createNotAllowedDoc.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(createNotAllowedDoc); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "_xpack/watcher/_start"); - assertOK(startResponse); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); String body = EntityUtils.toString(startResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -82,18 +81,18 @@ public void startWatcher() throws Exception { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + assertOK(adminClient().performRequest(new Request("HEAD", "_template/" + template))); } }); } @After public void stopWatcher() throws Exception { - assertOK(adminClient().performRequest("DELETE", "my_test_index")); + adminClient().performRequest(new Request("DELETE", "/my_test_index")); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); @@ -106,8 +105,7 @@ public void stopWatcher() throws Exception { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); - assertOK(stopResponse); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); String body = EntityUtils.toString(stopResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -137,6 +135,7 @@ protected Settings restAdminSettings() { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchInputHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -160,6 +159,7 @@ public void testSearchInputHasPermissions() throws Exception { assertThat(conditionMet, is(true)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29893") public void testSearchInputWithInsufficientPrivileges() throws Exception { String indexName = "index_not_allowed_to_read"; try (XContentBuilder builder = jsonBuilder()) { @@ -186,6 +186,7 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { assertThat(conditionMet, is(false)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchTransformHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -210,11 +211,12 @@ public void testSearchTransformHasPermissions() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String value = getObjectPath.evaluate("_source.hits.hits.0._source.value"); assertThat(value, is("15")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33291") public void testSearchTransformInsufficientPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -238,11 +240,11 @@ public void testSearchTransformInsufficientPermissions() throws Exception { getWatchHistoryEntry(watchId); - Response response = adminClient().performRequest("GET", "my_test_index/doc/some-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/my_test_index/doc/some-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") public void testIndexActionHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -262,11 +264,12 @@ public void testIndexActionHasPermissions() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String spam = getObjectPath.evaluate("_source.spam"); assertThat(spam, is("eggs")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testIndexActionInsufficientPrivileges() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -286,16 +289,14 @@ public void testIndexActionInsufficientPrivileges() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - Response response = adminClient().performRequest("GET", "index_not_allowed_to_read/doc/my-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/index_not_allowed_to_read/doc/my-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } @@ -307,7 +308,7 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -323,8 +324,9 @@ private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exc .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index dc87248df617f..5923afcacad94 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:lang-painless', configuration: 'runtime') diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 86d97d01904fe..f7ecb6d58e522 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -23,7 +22,6 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -39,15 +37,15 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { // delete the watcher history to not clutter with entries from other test - assertOK(adminClient().performRequest("DELETE", ".watcher-history-*")); + assertOK(adminClient().performRequest(new Request("DELETE", "/.watcher-history-*"))); assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "/_xpack/watcher/_start"); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); boolean isAcknowledged = ObjectPath.createFromResponse(startResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -65,7 +63,7 @@ public void startWatcher() throws Exception { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - Response templateExistsResponse = adminClient().performRequest("HEAD", "_template/" + template, emptyMap()); + Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template)); assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200)); } }); @@ -74,7 +72,7 @@ public void startWatcher() throws Exception { @After public void stopWatcher() throws Exception { assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats", emptyMap()); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { @@ -86,7 +84,7 @@ public void stopWatcher() throws Exception { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "/_xpack/watcher/_stop", emptyMap()); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); boolean isAcknowledged = ObjectPath.createFromResponse(stopResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -108,16 +106,17 @@ protected Settings restAdminSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32299") public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; // get master publish address - Response clusterStateResponse = adminClient().performRequest("GET", "_cluster/state"); + Response clusterStateResponse = adminClient().performRequest(new Request("GET", "/_cluster/state")); ObjectPath clusterState = ObjectPath.createFromResponse(clusterStateResponse); String masterNode = clusterState.evaluate("master_node"); assertThat(masterNode, is(notNullValue())); - Response statsResponse = adminClient().performRequest("GET", "_nodes"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_nodes")); ObjectPath stats = ObjectPath.createFromResponse(statsResponse); String address = stats.evaluate("nodes." + masterNode + ".http.publish_address"); assertThat(address, is(notNullValue())); @@ -163,16 +162,15 @@ public void testMonitorClusterHealth() throws Exception { } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } private void deleteWatch(String watchId) throws IOException { - Response response = client().performRequest("DELETE", "_xpack/watcher/watch/" + watchId); + Response response = client().performRequest(new Request("DELETE", "/_xpack/watcher/watch/" + watchId)); assertOK(response); ObjectPath path = ObjectPath.createFromResponse(response); boolean found = path.evaluate("found"); @@ -182,7 +180,7 @@ private void deleteWatch(String watchId) throws IOException { private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -194,8 +192,9 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); @@ -208,7 +207,7 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { } private void assertWatchCount(int expectedWatches) throws IOException { - Response watcherStatsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response watcherStatsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(watcherStatsResponse); int watchCount = objectPath.evaluate("stats.0.watch_count"); assertThat(watchCount, is(expectedWatches)); diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 17a1d5acdc99f..baaf0451e51f2 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RunTask description = 'Integration tests for SQL' @@ -29,8 +28,7 @@ dependenciesInfo.enabled = false // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } thirdPartyAudit.excludes = [ diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java new file mode 100644 index 0000000000000..30756a11f62ec --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase; + +/* + * Integration testing class for "no security" (cluster running without the Security plugin, + * or the Security is disbled) scenario. Runs all tests in the base class. + */ +public class JdbcResultSetIT extends ResultSetTestCase { +} diff --git a/x-pack/qa/sql/security/build.gradle b/x-pack/qa/sql/security/build.gradle index f02886f80a103..15f7734f9422e 100644 --- a/x-pack/qa/sql/security/build.gradle +++ b/x-pack/qa/sql/security/build.gradle @@ -1,5 +1,5 @@ dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } Project mainProject = project @@ -20,7 +20,7 @@ subprojects { } dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } integTestCluster { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java index 05140577bcdf6..22ba2a1037d96 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -42,14 +42,15 @@ protected static void loadDatasetIntoEs(RestClient client) throws Exception { } protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { - loadEmpDatasetIntoEs(client, "test_emp"); - loadEmpDatasetIntoEs(client, "test_emp_copy"); + loadEmpDatasetIntoEs(client, "test_emp", "employees"); + loadEmpDatasetIntoEs(client, "test_emp_copy", "employees"); + loadEmpDatasetIntoEs(client, "test_emp_with_nulls", "employees_with_nulls"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); } public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { - loadEmpDatasetIntoEs(client, "emp"); + loadEmpDatasetIntoEs(client, "emp", "employees"); loadLibDatasetIntoEs(client, "library"); makeAlias(client, "employees", "emp"); } @@ -62,7 +63,7 @@ private static void createString(String name, XContentBuilder builder) throws Ex .endObject(); } - protected static void loadEmpDatasetIntoEs(RestClient client, String index) throws Exception { + protected static void loadEmpDatasetIntoEs(RestClient client, String index, String fileName) throws Exception { Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); createIndex.startObject("settings"); @@ -129,15 +130,18 @@ protected static void loadEmpDatasetIntoEs(RestClient client, String index) thro request = new Request("POST", "/" + index + "/emp/_bulk"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); - csvToLines("employees", (titles, fields) -> { + csvToLines(fileName, (titles, fields) -> { bulk.append("{\"index\":{}}\n"); bulk.append('{'); String emp_no = fields.get(1); for (int f = 0; f < fields.size(); f++) { - if (f != 0) { - bulk.append(','); + // an empty value in the csv file is treated as 'null', thus skipping it in the bulk request + if (fields.get(f).trim().length() > 0) { + if (f != 0) { + bulk.append(','); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); } - bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); } // append department List> list = dep_emp.get(emp_no); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java index 861a6dccaba57..447fc4f17e182 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java @@ -5,55 +5,1067 @@ */ package org.elasticsearch.xpack.qa.sql.jdbc; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; +import org.elasticsearch.xpack.sql.type.DataType; + import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.sql.Blob; +import java.sql.Clob; import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.JDBCType; +import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import java.util.Calendar; import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static java.util.Calendar.DAY_OF_MONTH; +import static java.util.Calendar.ERA; +import static java.util.Calendar.HOUR_OF_DAY; +import static java.util.Calendar.MILLISECOND; +import static java.util.Calendar.MINUTE; +import static java.util.Calendar.MONTH; +import static java.util.Calendar.SECOND; +import static java.util.Calendar.YEAR; public class ResultSetTestCase extends JdbcIntegrationTestCase { - public void testGettingTimestamp() throws Exception { - long randomMillis = randomLongBetween(0, System.currentTimeMillis()); + + static final Set fieldsNames = Stream.of("test_byte", "test_integer", "test_long", "test_short", "test_double", + "test_float", "test_keyword") + .collect(Collectors.toCollection(HashSet::new)); + static final Map,JDBCType> dateTimeTestingFields = new HashMap,JDBCType>(); + static final String SELECT_ALL_FIELDS = "SELECT test_boolean, test_byte, test_integer," + + "test_long, test_short, test_double, test_float, test_keyword, test_date FROM test"; + static final String SELECT_WILDCARD = "SELECT * FROM test"; + static { + dateTimeTestingFields.put(new Tuple("test_boolean", true), DataType.BOOLEAN.jdbcType); + dateTimeTestingFields.put(new Tuple("test_byte", 1), DataType.BYTE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_integer", 1), DataType.INTEGER.jdbcType); + dateTimeTestingFields.put(new Tuple("test_long", 1L), DataType.LONG.jdbcType); + dateTimeTestingFields.put(new Tuple("test_short", 1), DataType.SHORT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_double", 1d), DataType.DOUBLE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_float", 1f), DataType.FLOAT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_keyword", "true"), DataType.KEYWORD.jdbcType); + } + + // Byte values testing + public void testGettingValidByteWithoutCasting() throws Exception { + byte random1 = randomByte(); + byte random2 = randomValueOtherThan(random1, () -> randomByte()); + byte random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomByte()); + + createTestDataForByteValueTests(random1, random2, random3); + + doWithQuery("SELECT test_byte, test_null_byte, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getByte(1)); + assertEquals(random1, results.getByte("test_byte")); + assertEquals(random1, (byte) results.getObject("test_byte", Byte.class)); + assertTrue(results.getObject(1) instanceof Byte); + + assertEquals(0, results.getByte(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_byte")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getByte(1)); + assertEquals(random2, results.getByte("test_byte")); + assertTrue(results.getObject(1) instanceof Byte); + assertEquals(random3, results.getByte("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidByteWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomByte()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + byte actual = results.getObject(e.getKey(), Byte.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), actual); + } + } + }); + } + + public void testGettingInvalidByte() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotByte = randomIntBetween(Byte.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotByte = randomLongBetween(Byte.MAX_VALUE + 1, Long.MAX_VALUE); + short shortNotByte = (short) randomIntBetween(Byte.MAX_VALUE + 1, Short.MAX_VALUE); + double doubleNotByte = randomDoubleBetween(Byte.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotByte = randomFloatBetween(Byte.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotByte > Long.MAX_VALUE || doubleNotByte < Long.MIN_VALUE) ? + Double.toString(doubleNotByte) : Long.toString(Math.round(doubleNotByte)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotByte); + builder.field("test_long", longNotByte); + builder.field("test_short", shortNotByte); + builder.field("test_double", doubleNotByte); + builder.field("test_float", floatNotByte); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getByte("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_short")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_short", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + }); + } + + // Short values testing + public void testGettingValidShortWithoutCasting() throws Exception { + short random1 = randomShort(); + short random2 = randomValueOtherThan(random1, () -> randomShort()); + short random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomShort()); + + createTestDataForShortValueTests(random1, random2, random3); + + doWithQuery("SELECT test_short, test_null_short, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getShort(1)); + assertEquals(random1, results.getShort("test_short")); + assertEquals(random1, results.getObject("test_short")); + assertTrue(results.getObject(1) instanceof Short); + + assertEquals(0, results.getShort(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_short")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getShort(1)); + assertEquals(random2, results.getShort("test_short")); + assertTrue(results.getObject(1) instanceof Short); + assertEquals(random3, results.getShort("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidShortWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomShort()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + short actual = (short) results.getObject(e.getKey(), Short.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), + e.getValue().shortValue(), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().shortValue(), actual); + } + } + }); + } + + public void testGettingInvalidShort() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotShort = randomIntBetween(Short.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotShort = randomLongBetween(Short.MAX_VALUE + 1, Long.MAX_VALUE); + double doubleNotShort = randomDoubleBetween(Short.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotShort = randomFloatBetween(Short.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotShort > Long.MAX_VALUE || doubleNotShort < Long.MIN_VALUE) ? + Double.toString(doubleNotShort) : Long.toString(Math.round(doubleNotShort)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotShort); + builder.field("test_long", longNotShort); + builder.field("test_double", doubleNotShort); + builder.field("test_float", floatNotShort); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getShort("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + }); + } + + // Integer values testing + public void testGettingValidIntegerWithoutCasting() throws Exception { + int random1 = randomInt(); + int random2 = randomValueOtherThan(random1, () -> randomInt()); + int random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomInt()); + + createTestDataForIntegerValueTests(random1, random2, random3); + + doWithQuery("SELECT test_integer,test_null_integer,test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(1)); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getInt(1)); + assertEquals(random1, results.getInt("test_integer")); + assertEquals(random1, (int) results.getObject("test_integer", Integer.class)); + assertTrue(results.getObject(1) instanceof Integer); + + assertEquals(0, results.getInt(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_integer")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getInt(1)); + assertEquals(random2, results.getInt("test_integer")); + assertTrue(results.getObject(1) instanceof Integer); + assertEquals(random3, results.getInt("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidIntegerWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomInt()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + int actual = results.getObject(e.getKey(), Integer.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().intValue(), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().intValue(), actual); + } + } + }); + } + + public void testGettingInvalidInteger() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + long longNotInt = randomLongBetween(getMaxIntPlusOne(), Long.MAX_VALUE); + double doubleNotInt = randomDoubleBetween(getMaxIntPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotInt = randomFloatBetween(getMaxIntPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotInt > Long.MAX_VALUE || doubleNotInt < Long.MIN_VALUE) ? + Double.toString(doubleNotInt) : Long.toString(Math.round(doubleNotInt)); + + index("test", "1", builder -> { + builder.field("test_long", longNotInt); + builder.field("test_double", doubleNotInt); + builder.field("test_float", floatNotInt); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getInt("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + }); + } + + // Long values testing + public void testGettingValidLongWithoutCasting() throws Exception { + long random1 = randomLong(); + long random2 = randomValueOtherThan(random1, () -> randomLong()); + long random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomLong()); + + createTestDataForLongValueTests(random1, random2, random3); + + doWithQuery("SELECT test_long, test_null_long, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getLong(1)); + assertEquals(random1, results.getLong("test_long")); + assertEquals(random1, (long) results.getObject("test_long", Long.class)); + assertTrue(results.getObject(1) instanceof Long); + + assertEquals(0, results.getLong(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_long")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getLong(1)); + assertEquals(random2, results.getLong("test_long")); + assertTrue(results.getObject(1) instanceof Long); + assertEquals(random3, results.getLong("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidLongWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomLong()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + long actual = results.getObject(e.getKey(), Long.class); + if (e.getValue() instanceof Double || e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().longValue(), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().longValue(), actual); + } + } + }); + } + + public void testGettingInvalidLong() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + double doubleNotLong = randomDoubleBetween(getMaxLongPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotLong = randomFloatBetween(getMaxLongPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_double", doubleNotLong); + builder.field("test_float", floatNotLong); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getLong("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + }); + } + + // Double values testing + public void testGettingValidDoubleWithoutCasting() throws Exception { + double random1 = randomDouble(); + double random2 = randomValueOtherThan(random1, () -> randomDouble()); + double random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomDouble()); + + createTestDataForDoubleValueTests(random1, random2, random3); + + doWithQuery("SELECT test_double, test_null_double, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(1)); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getDouble(1), 0.0d); + assertEquals(random1, results.getDouble("test_double"), 0.0d); + assertEquals(random1, results.getObject("test_double", Double.class), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + + assertEquals(0, results.getDouble(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_double")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getDouble(1), 0.0d); + assertEquals(random2, results.getDouble("test_double"), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + assertEquals(random3, results.getDouble("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidDoubleWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomDouble()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().doubleValue(), results.getDouble(e.getKey()), 0.0d); + assertEquals("For field " + e.getKey(), + e.getValue().doubleValue(), results.getObject(e.getKey(), Double.class), 0.0d); + } + }); + } + + public void testGettingInvalidDouble() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getDouble("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + }); + } + + // Float values testing + public void testGettingValidFloatWithoutCasting() throws Exception { + float random1 = randomFloat(); + float random2 = randomValueOtherThan(random1, () -> randomFloat()); + float random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomFloat()); + + createTestDataForFloatValueTests(random1, random2, random3); + + doWithQuery("SELECT test_float, test_null_float, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(1)); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getFloat(1), 0.0f); + assertEquals(random1, results.getFloat("test_float"), 0.0f); + assertEquals(random1, results.getObject("test_float", Float.class), 0.0f); + assertTrue(results.getObject(1) instanceof Float); + + assertEquals(0, results.getFloat(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_float")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getFloat(1), 0.0d); + assertEquals(random2, results.getFloat("test_float"), 0.0d); + assertTrue(results.getObject(1) instanceof Float); + assertEquals(random3, results.getFloat("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidFloatWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomFloat()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().floatValue(), results.getFloat(e.getKey()), 0.0f); + assertEquals("For field " + e.getKey(), + e.getValue().floatValue(), results.getObject(e.getKey(), Float.class), 0.0f); + } + }); + } + + public void testGettingInvalidFloat() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getFloat("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + }); + } + + public void testGettingBooleanValues() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + long randomDate1 = randomLong(); + long randomDate2 = randomLong(); + + // true values + indexSimpleDocumentWithTrueValues(randomDate1); + + // false values + index("test", "2", builder -> { + builder.field("test_boolean", false); + builder.field("test_byte", 0); + builder.field("test_integer", 0); + builder.field("test_long", 0L); + builder.field("test_short", 0); + builder.field("test_double", 0d); + builder.field("test_float", 0f); + builder.field("test_keyword", "false"); + builder.field("test_date", randomDate2); + }); + + // other (non 0 = true) values + index("test", "3", builder -> { + builder.field("test_byte", randomValueOtherThan((byte) 0, () -> randomByte())); + builder.field("test_integer", randomValueOtherThan(0, () -> randomInt())); + builder.field("test_long", randomValueOtherThan(0L, () -> randomLong())); + builder.field("test_short", randomValueOtherThan((short) 0, () -> randomShort())); + builder.field("test_double", randomValueOtherThanMany(i -> i < 1.0d && i > -1.0d && i < Double.MAX_VALUE + && i > Double.MIN_VALUE, + () -> randomDouble() * randomInt())); + builder.field("test_float", randomValueOtherThanMany(i -> i < 1.0f && i > -1.0f && i < Float.MAX_VALUE && i > Float.MIN_VALUE, + () -> randomFloat() * randomInt())); + builder.field("test_keyword", "1"); + }); + + // other false values + index("test", "4", builder -> { + builder.field("test_keyword", "0"); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + assertEquals(true, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate1), + sqle.getMessage()); + + results.next(); + assertEquals(false, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, false, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, false, results.getObject(fld, Boolean.class)); + } + sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + results.next(); + for(String fld : fieldsNames.stream() + .filter((f) -> !f.equals("test_keyword")).collect(Collectors.toCollection(HashSet::new))) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + + results.next(); + assertEquals(false, results.getBoolean("test_keyword")); + assertEquals(false, results.getObject("test_keyword", Boolean.class)); + }); + } + + public void testGettingDateWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar connCalendar = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + connCalendar.setTimeInMillis(randomLongDate); + connCalendar.set(HOUR_OF_DAY, 0); + connCalendar.set(MINUTE, 0); + connCalendar.set(SECOND, 0); + connCalendar.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date"), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getDate(9), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + assertEquals(results.getObject(9, java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithoutCalendar(results::getDate); + }); + } + + public void testGettingDateWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date", c), new java.sql.Date(c.getTimeInMillis())); + assertEquals(results.getDate(9, c), new java.sql.Date(c.getTimeInMillis())); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithCalendar(c, results::getDate); + + results.next(); + assertNull(results.getDate("test_date")); + }); + } + + public void testGettingTimeWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date"), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + assertEquals(results.getObject(9, java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + + validateErrorsForDateTimeTestsWithoutCalendar(results::getTime); + }); + } + + public void testGettingTimeWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date", c), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9, c), new java.sql.Time(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTime); + + results.next(); + assertNull(results.getTime("test_date")); + }); + } + + public void testGettingTimestampWithoutCalendar() throws Exception { + createIndex("library"); + updateMapping("library", builder -> { + builder.startObject("release_date").field("type", "date").endObject(); + builder.startObject("republish_date").field("type", "date").endObject(); + }); + long randomMillis = randomLong(); index("library", "1", builder -> { builder.field("name", "Don Quixote"); builder.field("page_count", 1072); - builder.timeField("release_date", new Date(randomMillis)); + builder.field("release_date", randomMillis); builder.timeField("republish_date", null); }); index("library", "2", builder -> { builder.field("name", "1984"); builder.field("page_count", 328); - builder.timeField("release_date", new Date(-649036800000L)); - builder.timeField("republish_date", new Date(599616000000L)); + builder.field("release_date", -649036800000L); + builder.field("republish_date", 599616000000L); }); - try (Connection connection = esJdbc()) { - try (PreparedStatement statement = connection.prepareStatement("SELECT name, release_date, republish_date FROM library")) { - try (ResultSet results = statement.executeQuery()) { - ResultSetMetaData resultSetMetaData = results.getMetaData(); - - results.next(); - assertEquals(3, resultSetMetaData.getColumnCount()); - assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); - assertEquals(randomMillis, results.getTimestamp(2).getTime()); - assertTrue(results.getObject(2) instanceof Timestamp); - assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); - - assertNull(results.getTimestamp(3)); - assertNull(results.getObject("republish_date")); - - assertTrue(results.next()); - assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); - assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); - - assertFalse(results.next()); - } - } - } + doWithQuery("SELECT name, release_date, republish_date FROM library", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); + assertEquals(randomMillis, results.getTimestamp(2).getTime()); + assertTrue(results.getObject(2) instanceof Timestamp); + assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); + + assertNull(results.getTimestamp(3)); + assertNull(results.getObject("republish_date")); + + assertTrue(results.next()); + assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); + assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); + + assertFalse(results.next()); + }); + } + + public void testGettingTimestampWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + + assertEquals(results.getTimestamp("test_date", c), new java.sql.Timestamp(c.getTimeInMillis())); + assertEquals(results.getTimestamp(9, c), new java.sql.Timestamp(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTimestamp); + + results.next(); + assertNull(results.getTimestamp("test_date")); + }); + } + + public void testValidGetObjectCalls() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + byte b = randomByte(); + int i = randomInt(); + long l = randomLong(); + short s = (short) randomIntBetween(Short.MIN_VALUE, Short.MAX_VALUE); + double d = randomDouble(); + float f = randomFloat(); + boolean randomBool = randomBoolean(); + Long randomLongDate = randomLong(); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + + index("test", "1", builder -> { + builder.field("test_byte", b); + builder.field("test_integer", i); + builder.field("test_long", l); + builder.field("test_short", s); + builder.field("test_double", d); + builder.field("test_float", f); + builder.field("test_keyword", randomString); + builder.field("test_date", randomLongDate); + builder.field("test_boolean", randomBool); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + assertEquals(b, results.getObject("test_byte")); + assertTrue(results.getObject("test_byte") instanceof Byte); + + assertEquals(i, results.getObject("test_integer")); + assertTrue(results.getObject("test_integer") instanceof Integer); + + assertEquals(l, results.getObject("test_long")); + assertTrue(results.getObject("test_long") instanceof Long); + + assertEquals(s, results.getObject("test_short")); + assertTrue(results.getObject("test_short") instanceof Short); + + assertEquals(d, results.getObject("test_double")); + assertTrue(results.getObject("test_double") instanceof Double); + + assertEquals(f, results.getObject("test_float")); + assertTrue(results.getObject("test_float") instanceof Float); + + assertEquals(randomString, results.getObject("test_keyword")); + assertTrue(results.getObject("test_keyword") instanceof String); + + assertEquals(new Date(randomLongDate), results.getObject("test_date")); + assertTrue(results.getObject("test_date") instanceof Timestamp); + + assertEquals(randomBool, results.getObject("test_boolean")); + assertTrue(results.getObject("test_boolean") instanceof Boolean); + }); } /* @@ -79,4 +1091,458 @@ public void testNoInfiniteRecursiveGetObjectCalls() throws SQLException, IOExcep fail("Infinite recursive call on getObject() method"); } } + + public void testUnsupportedGetMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream("test"), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream(1), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray("test"), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray(1), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream("test"), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream(1), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob("test"), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob(1), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream("test"), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream(1), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob("test"), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob(1), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream("test"), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream(1), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob("test"), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob(1), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString("test"), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString(1), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef("test"), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef(1), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId("test"), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId(1), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML("test"), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML(1), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL("test"), "URL not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL(1), "URL not supported"); + } + + public void testUnsupportedUpdateMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + Blob b = null; + InputStream i = null; + Clob c = null; + NClob nc = null; + Reader rd = null; + + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean(1, false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean("", false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte(1, (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte("", (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble(1, 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble("", 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat(1, 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat("", 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt(1, 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt("", 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong(1, 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong("", 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull(1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull("")); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort(1, (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort("", (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.insertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.deleteRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.cancelRowUpdates()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToInsertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.refreshRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToCurrentRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowUpdated()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowInserted()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowDeleted()); + } + + private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void doWithQueryAndTimezone(String query, String tz, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc(tz)) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void createIndex(String index) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("doc"); + { + createIndex.startObject("properties"); + {} + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client().performRequest(request); + } + + private void updateMapping(String index, CheckedConsumer body) throws Exception { + Request request = new Request("PUT", "/" + index + "/_mapping/doc"); + XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); + updateMapping.startObject("properties"); + { + body.accept(updateMapping); + } + updateMapping.endObject().endObject(); + + request.setJsonEntity(Strings.toString(updateMapping)); + client().performRequest(request); + } + + private void createTestDataForByteValueTests(byte random1, byte random2, byte random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_byte").field("type", "byte").endObject(); + builder.startObject("test_null_byte").field("type", "byte").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_byte", random1); + builder.field("test_null_byte", (Byte) null); + }); + index("test", "2", builder -> { + builder.field("test_byte", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForShortValueTests(short random1, short random2, short random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_short").field("type", "short").endObject(); + builder.startObject("test_null_short").field("type", "short").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_short", random1); + builder.field("test_null_short", (Short) null); + }); + index("test", "2", builder -> { + builder.field("test_short", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForIntegerValueTests(int random1, int random2, int random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_integer").field("type", "integer").endObject(); + builder.startObject("test_null_integer").field("type", "integer").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_integer", random1); + builder.field("test_null_integer", (Integer) null); + }); + index("test", "2", builder -> { + builder.field("test_integer", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForLongValueTests(long random1, long random2, long random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_long").field("type", "long").endObject(); + builder.startObject("test_null_long").field("type", "long").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_long", random1); + builder.field("test_null_long", (Long) null); + }); + index("test", "2", builder -> { + builder.field("test_long", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForDoubleValueTests(double random1, double random2, double random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_double").field("type", "double").endObject(); + builder.startObject("test_null_double").field("type", "double").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_double", random1); + builder.field("test_null_double", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_double", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForFloatValueTests(float random1, float random2, float random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_float").field("type", "float").endObject(); + builder.startObject("test_null_float").field("type", "float").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_float", random1); + builder.field("test_null_float", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_float", random2); + builder.field("test_keyword", random3); + }); + } + + private void indexSimpleDocumentWithTrueValues(Long randomLongDate) throws IOException { + index("test", "1", builder -> { + builder.field("test_boolean", true); + builder.field("test_byte", 1); + builder.field("test_integer", 1); + builder.field("test_long", 1L); + builder.field("test_short", 1); + builder.field("test_double", 1d); + builder.field("test_float", 1f); + builder.field("test_keyword", "true"); + builder.field("test_date", randomLongDate); + }); + } + + /** + * Creates test data for all numeric get* methods. All values random and different from the other numeric fields already generated. + * It returns a map containing the field name and its randomly generated value to be later used in checking the returned values. + */ + private Map createTestDataForNumericValueTypes(Supplier randomGenerator) throws Exception, IOException { + Map map = new HashMap(); + createIndex("test"); + updateMappingForNumericValuesTests("test"); + + index("test", "1", builder -> { + // random Byte + byte test_byte = randomValueOtherThanMany(map::containsValue, randomGenerator).byteValue(); + builder.field("test_byte", test_byte); + map.put("test_byte", test_byte); + + // random Integer + int test_integer = randomValueOtherThanMany(map::containsValue, randomGenerator).intValue(); + builder.field("test_integer", test_integer); + map.put("test_integer", test_integer); + + // random Short + int test_short = randomValueOtherThanMany(map::containsValue, randomGenerator).shortValue(); + builder.field("test_short", test_short); + map.put("test_short", test_short); + + // random Long + long test_long = randomValueOtherThanMany(map::containsValue, randomGenerator).longValue(); + builder.field("test_long", test_long); + map.put("test_long", test_long); + + // random Double + double test_double = randomValueOtherThanMany(map::containsValue, randomGenerator).doubleValue(); + builder.field("test_double", test_double); + map.put("test_double", test_double); + + // random Float + float test_float = randomValueOtherThanMany(map::containsValue, randomGenerator).floatValue(); + builder.field("test_float", test_float); + map.put("test_float", test_float); + }); + return map; + } + + private void updateMappingForNumericValuesTests(String indexName) throws Exception { + updateMapping(indexName, builder -> { + for(String field : fieldsNames) { + builder.startObject(field).field("type", field.substring(5)).endObject(); + } + }); + } + + private void assertThrowsUnsupportedAndExpectErrorMessage(ThrowingRunnable runnable, String message) { + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, runnable); + assertEquals(message, sqle.getMessage()); + } + + private void assertThrowsWritesUnsupportedForUpdate(ThrowingRunnable r) { + assertThrowsUnsupportedAndExpectErrorMessage(r, "Writes not supported"); + } + + private void validateErrorsForDateTimeTestsWithoutCalendar(CheckedFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private void validateErrorsForDateTimeTestsWithCalendar(Calendar c, CheckedBiFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1(), c)); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private float randomFloatBetween(float start, float end) { + float result = 0.0f; + while (result < start || result > end || Float.isNaN(result)) { + result = start + randomFloat() * (end - start); + } + + return result; + } + + private Long getMaxIntPlusOne() { + return Long.valueOf(Integer.MAX_VALUE) + 1L; + } + + private Double getMaxLongPlusOne() { + return Double.valueOf(Long.MAX_VALUE) + 1d; + } + + private Connection esJdbc(String timeZoneId) throws SQLException { + return randomBoolean() ? useDriverManager(timeZoneId) : useDataSource(timeZoneId); + } + + private Connection useDriverManager(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + String address = "jdbc:es://" + elasticsearchAddress; + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + Connection connection = DriverManager.getConnection(address, connectionProperties); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + private Connection useDataSource(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + JdbcDataSource dataSource = new JdbcDataSource(); + String address = "jdbc:es://" + elasticsearchAddress; + dataSource.setUrl(address); + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + dataSource.setProperties(connectionProperties); + Connection connection = dataSource.getConnection(); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java index 7621743481a4f..f5d559d9bf0b3 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java @@ -25,7 +25,8 @@ public void testSimpleExample() throws Exception { assertEquals("Don Quixote", results.getString(1)); assertEquals(1072, results.getInt(2)); SQLException e = expectThrows(SQLException.class, () -> results.getInt(1)); - assertTrue(e.getMessage(), e.getMessage().contains("unable to convert column 1 to an int")); + assertTrue(e.getMessage(), + e.getMessage().contains("Unable to convert value [Don Quixote] of type [VARCHAR] to an Integer")); assertFalse(results.next()); } // end::simple_example diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java index d61b4b9a946bd..b77820fc77e72 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -13,6 +13,7 @@ import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -24,7 +25,10 @@ public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { private String query; @ClassRule - public static LocalH2 H2 = new LocalH2((c) -> c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'")); + public static LocalH2 H2 = new LocalH2((c) -> { + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'"); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp_with_nulls.sql'"); + }); @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { @@ -37,8 +41,8 @@ public static List readScriptSpec() throws Exception { tests.addAll(readScriptSpec("/agg.sql-spec", parser)); tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); tests.addAll(readScriptSpec("/string-functions.sql-spec", parser)); - // AwaitsFix: https://github.com/elastic/elasticsearch/issues/32589 - // tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/agg_nulls.sql-spec", parser)); return tests; } @@ -60,8 +64,11 @@ public SqlSpecTestCase(String fileName, String groupName, String testName, Integ @Override protected final void doTest() throws Throwable { - boolean goodLocale = !(Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr").build()) - || Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr-TR").build())); + // we skip the tests in case of these locales because ES-SQL is Locale-insensitive for now + // while H2 does take the Locale into consideration + String[] h2IncompatibleLocales = new String[] {"tr", "az", "tr-TR", "tr-CY", "az-Latn", "az-Cyrl", "az-Latn-AZ", "az-Cyrl-AZ"}; + boolean goodLocale = !Arrays.stream(h2IncompatibleLocales) + .anyMatch((l) -> Locale.getDefault().equals(new Locale.Builder().setLanguageTag(l).build())); if (fileName.startsWith("case-functions")) { Assume.assumeTrue(goodLocale); } diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index f42ce0ef7a092..f1ab9160b1af4 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -394,4 +394,12 @@ SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM aggMultiWithHavingOnCount SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING c > 40 ORDER BY gender; aggMultiGroupByMultiWithHavingOnCount -SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; \ No newline at end of file +SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; + +// repetion of same aggs to check whether the generated query contains duplicates or not +aggRepeatFunctionAcrossFields +SELECT MIN(emp_no) AS a, 1 + MIN(emp_no) AS b, ABS(MIN(emp_no)) AS c FROM test_emp; +aggRepeatFunctionBetweenSelectAndHaving +SELECT gender, COUNT(DISTINCT languages) AS c FROM test_emp GROUP BY gender HAVING count(DISTINCT languages) > 0 ORDER BY gender; + + diff --git a/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec b/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec new file mode 100644 index 0000000000000..17fbb70a40bcb --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/agg_nulls.sql-spec @@ -0,0 +1,14 @@ +selectGenderWithNullsAndGroupByGender +SELECT gender, COUNT(*) count FROM test_emp_with_nulls GROUP BY gender ORDER BY gender; +selectFirstNameWithNullsAndGroupByFirstName +SELECT first_name FROM test_emp_with_nulls GROUP BY first_name ORDER BY first_name; +selectCountWhereIsNull +SELECT COUNT(*) count FROM test_emp_with_nulls WHERE first_name IS NULL; +selectLanguagesCountWithNullsAndGroupByLanguage +SELECT languages l, COUNT(*) c FROM test_emp_with_nulls GROUP BY languages ORDER BY languages; +selectHireDateGroupByHireDate +SELECT hire_date HD, COUNT(*) c FROM test_emp_with_nulls GROUP BY hire_date ORDER BY hire_date DESC; +selectHireDateGroupByHireDate +SELECT hire_date HD, COUNT(*) c FROM test_emp_with_nulls GROUP BY hire_date ORDER BY hire_date DESC; +selectSalaryGroupBySalary +SELECT salary, COUNT(*) c FROM test_emp_with_nulls GROUP BY salary ORDER BY salary DESC; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/alias.csv-spec b/x-pack/qa/sql/src/main/resources/alias.csv-spec index 839d2cba79451..f1fa900706a7d 100644 --- a/x-pack/qa/sql/src/main/resources/alias.csv-spec +++ b/x-pack/qa/sql/src/main/resources/alias.csv-spec @@ -86,6 +86,7 @@ test_alias | ALIAS test_alias_emp | ALIAS test_emp | BASE TABLE test_emp_copy | BASE TABLE +test_emp_with_nulls | BASE TABLE ; testGroupByOnAlias @@ -98,10 +99,10 @@ F | 10099.28 ; testGroupByOnPattern -SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_* GROUP BY gender; +SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_* WHERE gender is NOT NULL GROUP BY gender; gender:s | p1:d -F | 10099.28 -M | 10095.75 +F | 10099.32 +M | 10095.98 ; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 89e86e887e140..a8f23e27ffac0 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -162,3 +162,27 @@ last_name | VARCHAR last_name.keyword | VARCHAR salary | INTEGER ; + + +describeIncludeExclude +DESCRIBE "test_emp*,-test_alias*"; + +column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + diff --git a/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv b/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv new file mode 100644 index 0000000000000..482da640470d0 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/employees_with_nulls.csv @@ -0,0 +1,101 @@ +birth_date,emp_no,first_name,gender,hire_date,languages,last_name,salary +1953-09-02T00:00:00Z,10001,Georgi,,1986-06-26T00:00:00Z,2,Facello,57305 +1964-06-02T00:00:00Z,10002,Bezalel,,1985-11-21T00:00:00Z,5,Simmel,56371 +1959-12-03T00:00:00Z,10003,Parto,,1986-08-28T00:00:00Z,4,Bamford,61805 +1954-05-01T00:00:00Z,10004,Chirstian,,1986-12-01T00:00:00Z,5,Koblick,36174 +1955-01-21T00:00:00Z,10005,Kyoichi,,1989-09-12T00:00:00Z,1,Maliniak,63528 +1953-04-20T00:00:00Z,10006,Anneke,,1989-06-02T00:00:00Z,3,Preusig,60335 +1957-05-23T00:00:00Z,10007,Tzvetan,,1989-02-10T00:00:00Z,4,Zielinski,74572 +1958-02-19T00:00:00Z,10008,Saniya,,1994-09-15T00:00:00Z,2,Kalloufi,43906 +1952-04-19T00:00:00Z,10009,Sumant,,1985-02-18T00:00:00Z,1,Peac,66174 +1963-06-01T00:00:00Z,10010,Duangkaew,,1989-08-24T00:00:00Z,4,Piveteau,45797 +1953-11-07T00:00:00Z,10011,Mary,F,1990-01-22T00:00:00Z,5,Sluis,31120 +1960-10-04T00:00:00Z,10012,Patricio,M,1992-12-18T00:00:00Z,5,Bridgland,48942 +1963-06-07T00:00:00Z,10013,Eberhardt,M,1985-10-20T00:00:00Z,1,Terkki,48735 +1956-02-12T00:00:00Z,10014,Berni,M,1987-03-11T00:00:00Z,5,Genin,37137 +1959-08-19T00:00:00Z,10015,Guoxiang,M,1987-07-02T00:00:00Z,5,Nooteboom,25324 +1961-05-02T00:00:00Z,10016,Kazuhito,M,1995-01-27T00:00:00Z,2,Cappelletti,61358 +1958-07-06T00:00:00Z,10017,Cristinel,F,1993-08-03T00:00:00Z,2,Bouloucos,58715 +1954-06-19T00:00:00Z,10018,Kazuhide,F,1993-08-03T00:00:00Z,2,Peha,56760 +1953-01-23T00:00:00Z,10019,Lillian,M,1993-08-03T00:00:00Z,1,Haddadi,73717 +1952-12-24T00:00:00Z,10020,,M,1991-01-26T00:00:00Z,3,Warwick,40031 +1960-02-20T00:00:00Z,10021,,M,1989-12-17T00:00:00Z,5,Erde,60408 +1952-07-08T00:00:00Z,10022,,M,1995-08-22T00:00:00Z,3,Famili,48233 +1953-09-29T00:00:00Z,10023,,F,1989-12-17T00:00:00Z,2,Montemayor,47896 +1958-09-05T00:00:00Z,10024,,F,1997-05-19T00:00:00Z,3,Pettey,64675 +1958-10-31T00:00:00Z,10025,Prasadram,M,1987-08-17T00:00:00Z,5,Heyers,47411 +1953-04-03T00:00:00Z,10026,Yongqiao,M,1995-03-20T00:00:00Z,3,Berztiss,28336 +1962-07-10T00:00:00Z,10027,Divier,F,1989-07-07T00:00:00Z,5,Reistad,73851 +1963-11-26T00:00:00Z,10028,Domenick,M,1991-10-22T00:00:00Z,1,Tempesti,39356 +1956-12-13T00:00:00Z,10029,Otmar,M,1985-11-20T00:00:00Z,,Herbst,74999 +1958-07-14T00:00:00Z,10030,Elvis,M,1994-02-17T00:00:00Z,,Demeyer,67492 +1959-01-27T00:00:00Z,10031,Karsten,M,1994-02-17T00:00:00Z,,Joslin,37716 +1960-08-09T00:00:00Z,10032,Jeong,F,1990-06-20T00:00:00Z,,Reistad,62233 +1956-11-14T00:00:00Z,10033,Arif,M,1987-03-18T00:00:00Z,,Merlo,70011 +1962-12-29T00:00:00Z,10034,Bader,M,1988-09-05T00:00:00Z,,Swan,39878 +1953-02-08T00:00:00Z,10035,Alain,M,1988-09-05T00:00:00Z,,Chappelet,25945 +1959-08-10T00:00:00Z,10036,Adamantios,M,1992-01-03T00:00:00Z,,Portugali,60781 +1963-07-22T00:00:00Z,10037,Pradeep,M,1990-12-05T00:00:00Z,,Makrucki,37691 +1960-07-20T00:00:00Z,10038,Huan,M,1989-09-20T00:00:00Z,,Lortz,35222 +1959-10-01T00:00:00Z,10039,Alejandro,M,1988-01-19T00:00:00Z,,Brender,36051 +1959-09-13T00:00:00Z,10040,Weiyi,F,1993-02-14T00:00:00Z,,Meriste,37112 +1959-08-27T00:00:00Z,10041,Uri,F,1989-11-12T00:00:00Z,1,Lenart,56415 +1956-02-26T00:00:00Z,10042,Magy,F,1993-03-21T00:00:00Z,3,Stamatiou,30404 +1960-09-19T00:00:00Z,10043,Yishay,M,1990-10-20T00:00:00Z,1,Tzvieli,34341 +1961-09-21T00:00:00Z,10044,Mingsen,F,1994-05-21T00:00:00Z,1,Casley,39728 +1957-08-14T00:00:00Z,10045,Moss,M,1989-09-02T00:00:00Z,3,Shanbhogue,74970 +1960-07-23T00:00:00Z,10046,Lucien,M,1992-06-20T00:00:00Z,4,Rosenbaum,50064 +1952-06-29T00:00:00Z,10047,Zvonko,M,1989-03-31T00:00:00Z,4,Nyanchama,42716 +1963-07-11T00:00:00Z,10048,Florian,M,1985-02-24T00:00:00Z,3,Syrotiuk,26436 +1961-04-24T00:00:00Z,10049,Basil,F,1992-05-04T00:00:00Z,5,Tramer,37853 +1958-05-21T00:00:00Z,10050,Yinghua,M,1990-12-25T00:00:00Z,2,Dredge,43026 +1953-07-28T00:00:00Z,10051,Hidefumi,M,1992-10-15T00:00:00Z,3,Caine,58121 +1961-02-26T00:00:00Z,10052,Heping,M,1988-05-21T00:00:00Z,1,Nitsch,55360 +1954-09-13T00:00:00Z,10053,Sanjiv,F,1986-02-04T00:00:00Z,3,Zschoche,54462 +1957-04-04T00:00:00Z,10054,Mayumi,M,1995-03-13T00:00:00Z,4,Schueller,65367 +1956-06-06T00:00:00Z,10055,Georgy,M,1992-04-27T00:00:00Z,5,Dredge,49281 +1961-09-01T00:00:00Z,10056,Brendon,F,1990-02-01T00:00:00Z,2,Bernini,33370 +1954-05-30T00:00:00Z,10057,Ebbe,F,1992-01-15T00:00:00Z,4,Callaway,27215 +1954-10-01T00:00:00Z,10058,Berhard,M,1987-04-13T00:00:00Z,3,McFarlin,38376 +1953-09-19T00:00:00Z,10059,Alejandro,F,1991-06-26T00:00:00Z,2,McAlpine,44307 +1961-10-15T00:00:00Z,10060,Breannda,M,1987-11-02T00:00:00Z,2,Billingsley,29175 +1962-10-19T00:00:00Z,10061,Tse,M,1985-09-17T00:00:00Z,1,Herber,49095 +1961-11-02T00:00:00Z,10062,Anoosh,M,1991-08-30T00:00:00Z,3,Peyn,65030 +1952-08-06T00:00:00Z,10063,Gino,F,1989-04-08T00:00:00Z,3,Leonhardt,52121 +1959-04-07T00:00:00Z,10064,Udi,M,1985-11-20T00:00:00Z,5,Jansch,33956 +1963-04-14T00:00:00Z,10065,Satosi,M,1988-05-18T00:00:00Z,2,Awdeh,50249 +1952-11-13T00:00:00Z,10066,Kwee,M,1986-02-26T00:00:00Z,5,Schusler,31897 +1953-01-07T00:00:00Z,10067,Claudi,M,1987-03-04T00:00:00Z,2,Stavenow,52044 +1962-11-26T00:00:00Z,10068,Charlene,M,1987-08-07T00:00:00Z,3,Brattka,28941 +1960-09-06T00:00:00Z,10069,Margareta,F,1989-11-05T00:00:00Z,5,Bierman,41933 +1955-08-20T00:00:00Z,10070,Reuven,M,1985-10-14T00:00:00Z,3,Garigliano,54329 +1958-01-21T00:00:00Z,10071,Hisao,M,1987-10-01T00:00:00Z,2,Lipner,40612 +1952-05-15T00:00:00Z,10072,Hironoby,F,1988-07-21T00:00:00Z,5,Sidou,54518 +1954-02-23T00:00:00Z,10073,Shir,M,1991-12-01T00:00:00Z,4,McClurg,32568 +1955-08-28T00:00:00Z,10074,Mokhtar,F,1990-08-13T00:00:00Z,5,Bernatsky,38992 +1960-03-09T00:00:00Z,10075,Gao,F,1987-03-19T00:00:00Z,5,Dolinsky,51956 +1952-06-13T00:00:00Z,10076,Erez,F,1985-07-09T00:00:00Z,3,Ritzmann,62405 +1964-04-18T00:00:00Z,10077,Mona,M,1990-03-02T00:00:00Z,5,Azuma,46595 +1959-12-25T00:00:00Z,10078,Danel,F,1987-05-26T00:00:00Z,2,Mondadori,69904 +1961-10-05T00:00:00Z,10079,Kshitij,F,1986-03-27T00:00:00Z,2,Gils,32263 +1957-12-03T00:00:00Z,10080,Premal,M,1985-11-19T00:00:00Z,5,Baek,52833 +1960-12-17T00:00:00Z,10081,Zhongwei,M,1986-10-30T00:00:00Z,2,Rosen,50128 +1963-09-09T00:00:00Z,10082,Parviz,M,1990-01-03T00:00:00Z,4,Lortz,49818 +1959-07-23T00:00:00Z,10083,Vishv,M,1987-03-31T00:00:00Z,1,Zockler, +1960-05-25T00:00:00Z,10084,Tuval,M,1995-12-15T00:00:00Z,1,Kalloufi, +1962-11-07T00:00:00Z,10085,Kenroku,M,1994-04-09T00:00:00Z,5,Malabarba, +1962-11-19T00:00:00Z,10086,Somnath,M,1990-02-16T00:00:00Z,1,Foote, +1959-07-23T00:00:00Z,10087,Xinglin,F,1986-09-08T00:00:00Z,5,Eugenio, +1954-02-25T00:00:00Z,10088,Jungsoon,F,1988-09-02T00:00:00Z,5,Syrzycki, +1963-03-21T00:00:00Z,10089,Sudharsan,F,1986-08-12T00:00:00Z,4,Flasterstein, +1961-05-30T00:00:00Z,10090,Kendra,M,1986-03-14T00:00:00Z,2,Hofting,44956 +1955-10-04T00:00:00Z,10091,Amabile,M,1992-11-18T00:00:00Z,3,Gomatam,38645 +1964-10-18T00:00:00Z,10092,Valdiodio,F,1989-09-22T00:00:00Z,1,Niizuma,25976 +1964-06-11T00:00:00Z,10093,Sailaja,M,1996-11-05T00:00:00Z,3,Desikan,45656 +1957-05-25T00:00:00Z,10094,Arumugam,F,1987-04-18T00:00:00Z,5,Ossenbruggen,66817 +1965-01-03T00:00:00Z,10095,Hilari,M,1986-07-15T00:00:00Z,4,Morton,37702 +1954-09-16T00:00:00Z,10096,Jayson,M,1990-01-14T00:00:00Z,4,Mandell,43889 +1952-02-27T00:00:00Z,10097,Remzi,M,1990-09-15T00:00:00Z,3,Waschkowski,71165 +1961-09-23T00:00:00Z,10098,Sreekrishna,F,1985-05-13T00:00:00Z,4,Servieres,44817 +1956-05-25T00:00:00Z,10099,Valter,F,1988-10-18T00:00:00Z,2,Sullins,73578 +1953-04-21T00:00:00Z,10100,Hironobu,F,1987-09-21T00:00:00Z,4,Haraldson,68431 diff --git a/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql b/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql new file mode 100644 index 0000000000000..c6afaa9018aa1 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_test_emp_with_nulls.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS "test_emp_with_nulls"; +CREATE TABLE "test_emp_with_nulls" ( + "birth_date" TIMESTAMP WITH TIME ZONE, + "emp_no" INT, + "first_name" VARCHAR(50), + "gender" VARCHAR(1), + "hire_date" TIMESTAMP WITH TIME ZONE, + "languages" TINYINT, + "last_name" VARCHAR(50), + "salary" INT + ) + AS SELECT * FROM CSVREAD('classpath:/employees_with_nulls.csv'); \ No newline at end of file diff --git a/x-pack/qa/third-party/hipchat/build.gradle b/x-pack/qa/third-party/hipchat/build.gradle index 03b6c31969844..2b2ee7fcbbf87 100644 --- a/x-pack/qa/third-party/hipchat/build.gradle +++ b/x-pack/qa/third-party/hipchat/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index 3814c8e9a5382..283f9688699bd 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index c0f337e160e0a..12758989d0f21 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index 431752765f3a0..f1bcd98cff694 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle index a94ad8fd59267..3ece6dd1147c4 100644 --- a/x-pack/qa/transport-client-tests/build.gradle +++ b/x-pack/qa/transport-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index f6a1f6cb16f2a..11b0e67183c8f 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -3,7 +3,7 @@ apply plugin: 'elasticsearch.build' dependencies { compile 'org.ow2.asm:asm:6.2' compile "org.elasticsearch:elasticsearch:${version}" - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle index 7155dad5ee60d..a96f4146fbf67 100644 --- a/x-pack/transport-client/build.gradle +++ b/x-pack/transport-client/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -10,7 +8,7 @@ archivesBaseName = 'x-pack-transport' dependencies { // this "api" dependency looks weird, but it is correct, as it contains // all of x-pack for now, and transport client will be going away in the future. - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch.client:transport:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" @@ -22,8 +20,7 @@ dependencyLicenses.enabled = false forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions {