diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c591459f01bb7..4285c8fd20c08 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -325,21 +325,19 @@ common configurations in our build and how we use them:
`compile`
Code that is on the classpath at both compile and -runtime. If the [`shadow`][shadow-plugin] plugin is applied to the project then -this code is bundled into the jar produced by the project.
+runtime.
`runtime`
Code that is not on the classpath at compile time but is on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
-
`compileOnly`
Code that is on the classpath at comile time but that +
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
-
`shadow`
Only available in projects with the shadow plugin. Code -that is on the classpath at both compile and runtime but it *not* bundled into -the jar produced by the project. If you depend on a project with the `shadow` -plugin then you need to depend on this configuration because it will bring -along all of the dependencies you need at runtime.
+
`bundle`
Only available in projects with the shadow plugin, +dependencies with this configuration are bundled into the jar produced by the +build. Since IDEs do not understand this configuration we rig them to treat +dependencies in this configuration as `compile` dependencies.
`testCompile`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
diff --git a/build.gradle b/build.gradle index 0df5b97ae4a26..36d3a543d89b6 100644 --- a/build.gradle +++ b/build.gradle @@ -22,6 +22,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.gradle.util.GradleVersion import org.gradle.util.DistributionLocator @@ -304,7 +305,7 @@ subprojects { // org.elasticsearch:elasticsearch must be the last one or all the links for the // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. - if (project.plugins.hasPlugin(BuildPlugin)) { + if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) { String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> @@ -322,13 +323,6 @@ subprojects { */ project.evaluationDependsOn(upstreamProject.path) project.javadoc.source += upstreamProject.javadoc.source - /* - * Do not add those projects to the javadoc classpath because - * we are going to resolve them with their source instead. - */ - project.javadoc.classpath = project.javadoc.classpath.filter { f -> - false == upstreamProject.configurations.archives.artifacts.files.files.contains(f) - } /* * Instead we need the upstream project's javadoc classpath so * we don't barf on the classes that it references. @@ -345,16 +339,16 @@ subprojects { project.configurations.compile.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) project.configurations.compileOnly.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .each({ c -> depJavadocClosure(false, c) }) if (hasShadow) { - project.configurations.shadow.dependencies + project.configurations.bundle.dependencies .findAll() .toSorted(sortClosure) - .each({ c -> depJavadocClosure(false, c) }) + .each({ c -> depJavadocClosure(true, c) }) } } } @@ -523,25 +517,18 @@ allprojects { allprojects { /* * IntelliJ and Eclipse don't know about the shadow plugin so when we're - * in "IntelliJ mode" or "Eclipse mode" add "runtime" dependencies - * eveywhere where we see a "shadow" dependency which will cause them to - * reference shadowed projects directly rather than rely on the shadowing - * to include them. This is the correct thing for it to do because it - * doesn't run the jar shadowing at all. This isn't needed for the project + * in "IntelliJ mode" or "Eclipse mode" switch "bundle" dependencies into + * regular "compile" dependencies. This isn't needed for the project * itself because the IDE configuration is done by SourceSets but it is * *is* needed for projects that depends on the project doing the shadowing. * Without this they won't properly depend on the shadowed project. */ if (isEclipse || isIdea) { - configurations.all { Configuration configuration -> - dependencies.all { Dependency dep -> - if (dep instanceof ProjectDependency) { - if (dep.getTargetConfiguration() == 'shadow') { - configuration.dependencies.add(project.dependencies.project(path: dep.dependencyProject.path, configuration: 'runtime')) - } - } - } - } + project.plugins.withType(ShadowPlugin).whenPluginAdded { + project.afterEvaluate { + project.configurations.compile.extendsFrom project.configurations.bundle + } + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index e45ba7ce9dc14..fb979a77dacea 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -79,8 +79,9 @@ class BuildPlugin implements Plugin { } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') - // these plugins add lots of info to our jars + configureConfigurations(project) configureJars(project) // jar config must be added before info broker + // these plugins add lots of info to our jars project.pluginManager.apply('nebula.info-broker') project.pluginManager.apply('nebula.info-basic') project.pluginManager.apply('nebula.info-java') @@ -91,8 +92,8 @@ class BuildPlugin implements Plugin { globalBuildInfo(project) configureRepositories(project) - configureConfigurations(project) project.ext.versions = VersionProperties.versions + configureSourceSets(project) configureCompile(project) configureJavadoc(project) configureSourcesJar(project) @@ -421,8 +422,10 @@ class BuildPlugin implements Plugin { project.configurations.compile.dependencies.all(disableTransitiveDeps) project.configurations.testCompile.dependencies.all(disableTransitiveDeps) project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) + project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.configurations.shadow.dependencies.all(disableTransitiveDeps) + Configuration bundle = project.configurations.create('bundle') + bundle.dependencies.all(disableTransitiveDeps) } } @@ -528,12 +531,16 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. - generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom" + generatePOMTask.ext.pomFileName = null doLast { project.copy { from generatePOMTask.destination into "${project.buildDir}/distributions" - rename { generatePOMTask.ext.pomFileName } + rename { + generatePOMTask.ext.pomFileName == null ? + "${project.archivesBaseName}-${project.version}.pom" : + generatePOMTask.ext.pomFileName + } } } // build poms with assemble (if the assemble task exists) @@ -556,30 +563,6 @@ class BuildPlugin implements Plugin { publications { nebula(MavenPublication) { artifacts = [ project.tasks.shadowJar ] - artifactId = project.archivesBaseName - /* - * Configure the pom to include the "shadow" as compile dependencies - * because that is how we're using them but remove all other dependencies - * because they've been shaded into the jar. - */ - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.remove(root.dependencies) - Node dependenciesNode = root.appendNode('dependencies') - project.configurations.shadow.allDependencies.each { - if (false == it instanceof SelfResolvingDependency) { - Node dependencyNode = dependenciesNode.appendNode('dependency') - dependencyNode.appendNode('groupId', it.group) - dependencyNode.appendNode('artifactId', it.name) - dependencyNode.appendNode('version', it.version) - dependencyNode.appendNode('scope', 'compile') - } - } - // Be tidy and remove the element if it is empty - if (dependenciesNode.children.empty) { - root.remove(dependenciesNode) - } - } } } } @@ -587,6 +570,20 @@ class BuildPlugin implements Plugin { } } + /** + * Add dependencies that we are going to bundle to the compile classpath. + */ + static void configureSourceSets(Project project) { + project.plugins.withType(ShadowPlugin).whenPluginAdded { + ['main', 'test'].each {name -> + SourceSet sourceSet = project.sourceSets.findByName(name) + if (sourceSet != null) { + sourceSet.compileClasspath += project.configurations.bundle + } + } + } + } + /** Adds compiler settings to the project */ static void configureCompile(Project project) { if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { @@ -764,9 +761,16 @@ class BuildPlugin implements Plugin { * better to be safe */ mergeServiceFiles() + /* + * Bundle dependencies of the "bundled" configuration. + */ + configurations = [project.configurations.bundle] } // Make sure we assemble the shadow jar project.tasks.assemble.dependsOn project.tasks.shadowJar + project.artifacts { + apiElements project.tasks.shadowJar + } } } @@ -873,13 +877,8 @@ class BuildPlugin implements Plugin { exclude '**/*$*.class' project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * If we make a shaded jar we test against it. - */ + // Test against a shadow jar if we made one classpath -= project.tasks.compileJava.outputs.files - classpath -= project.configurations.compile - classpath -= project.configurations.runtime - classpath += project.configurations.shadow classpath += project.tasks.shadowJar.outputs.files dependsOn project.tasks.shadowJar } @@ -905,26 +904,6 @@ class BuildPlugin implements Plugin { additionalTest.dependsOn(project.tasks.testClasses) project.check.dependsOn(additionalTest) }); - - project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * We need somewhere to configure dependencies that we don't wish - * to shade into the jar. The shadow plugin creates a "shadow" - * configuration which is *almost* exactly that. It is never - * bundled into the shaded jar but is used for main source - * compilation. Unfortunately, by default it is not used for - * *test* source compilation and isn't used in tests at all. This - * change makes it available for test compilation. - * - * Note that this isn't going to work properly with qa projects - * but they have no business applying the shadow plugin in the - * firstplace. - */ - SourceSet testSourceSet = project.sourceSets.findByName('test') - if (testSourceSet != null) { - testSourceSet.compileClasspath += project.configurations.shadow - } - } } private static configurePrecommit(Project project) { @@ -936,7 +915,7 @@ class BuildPlugin implements Plugin { it.group.startsWith('org.elasticsearch') == false } - project.configurations.compileOnly project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.dependencyLicenses.dependencies += project.configurations.shadow.fileCollection { + project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection { it.group.startsWith('org.elasticsearch') == false } } @@ -947,7 +926,7 @@ class BuildPlugin implements Plugin { deps.runtimeConfiguration = project.configurations.runtime project.plugins.withType(ShadowPlugin).whenPluginAdded { deps.runtimeConfiguration = project.configurations.create('infoDeps') - deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.shadow) + deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle) } deps.compileOnlyConfiguration = project.configurations.compileOnly project.afterEvaluate { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy index 7d5b793254fe4..daab0efc8c69a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy @@ -138,9 +138,8 @@ class VersionCollection { break } } - // caveat 0 - now dip back 2 versions to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major - 1) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) + // caveat 0 - the last supported snapshot of the line is on a version that we don't support (N-2) + maintenanceBugfixSnapshot = null } else { // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line // only check till minor == 0 of the major @@ -293,7 +292,8 @@ class VersionCollection { * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 */ private Version getHighestPreviousMinor(Integer nextMajorVersion) { - return versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")).last() + SortedSet result = versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")) + return result.isEmpty() ? null : result.last() } /** diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 5216f08427428..a14a3a680da1c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -157,11 +157,10 @@ public class PluginBuildPlugin extends BuildPlugin { from pluginMetadata // metadata (eg custom security policy) /* * If the plugin is using the shadow plugin then we need to bundle - * "shadow" things rather than the default jar and dependencies so - * we don't hit jar hell. + * that shadow jar. */ from { project.plugins.hasPlugin(ShadowPlugin) ? project.shadowJar : project.jar } - from { project.plugins.hasPlugin(ShadowPlugin) ? project.configurations.shadow : project.configurations.runtime - project.configurations.compileOnly } + from project.configurations.runtime - project.configurations.compileOnly // extra files for the plugin to go into the zip from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main') { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index 656d5e0d35a9e..4299efd95a383 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -19,6 +19,7 @@ package org.elasticsearch.gradle.precommit +import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.LoggedExec import org.gradle.api.file.FileCollection import org.gradle.api.tasks.OutputFile @@ -39,6 +40,9 @@ public class JarHellTask extends LoggedExec { public JarHellTask() { project.afterEvaluate { FileCollection classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + classpath += project.configurations.bundle + } inputs.files(classpath) dependsOn(classpath) description = "Runs CheckJarHell on ${classpath}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 42dc29df058c6..b63b1f40d8049 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,18 +18,12 @@ */ package org.elasticsearch.gradle.precommit -import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis -import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask -import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task -import org.gradle.api.file.FileCollection +import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.quality.Checkstyle -import org.gradle.api.tasks.JavaExec -import org.gradle.api.tasks.StopExecutionException - /** * Validation tasks which should be run before committing. These run before tests. */ @@ -38,8 +32,8 @@ class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ public static Task create(Project project, boolean includeDependencyLicenses) { List precommitTasks = [ - configureForbiddenApis(project), configureCheckstyle(project), + configureForbiddenApisCli(project), configureNamingConventions(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), @@ -48,9 +42,6 @@ class PrecommitTasks { project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) ] - // Configure it but don't add it as a dependency yet - configureForbiddenApisCli(project) - // tasks with just tests don't need dependency licenses, so this flag makes adding // the task optional if (includeDependencyLicenses) { @@ -84,77 +75,60 @@ class PrecommitTasks { return project.tasks.create(precommitOptions) } - private static Task configureForbiddenApis(Project project) { - project.pluginManager.apply(ForbiddenApisPlugin.class) - project.forbiddenApis { - failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out'] - signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), - getClass().getResource('/forbidden/es-all-signatures.txt')] - suppressAnnotations = ['**.SuppressForbidden'] - } - project.tasks.withType(CheckForbiddenApis) { - // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: - if (name.endsWith('Test')) { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] - } else { - signaturesURLs = project.forbiddenApis.signaturesURLs + - [ getClass().getResource('/forbidden/es-server-signatures.txt') ] - } - // forbidden apis doesn't support Java 11, so stop at 10 - String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ? - JavaVersion.VERSION_1_10 : - project.compilerJavaVersion).getMajorVersion() - targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}" - } - Task forbiddenApis = project.tasks.findByName('forbiddenApis') - forbiddenApis.group = "" // clear group, so this does not show up under verification tasks - - return forbiddenApis - } - private static Task configureForbiddenApisCli(Project project) { - project.configurations.create("forbiddenApisCliJar") + Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar") project.dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.5' + forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5') } - Task forbiddenApisCli = project.tasks.create('forbiddenApisCli') + Task forbiddenApisCli = project.tasks.create('forbiddenApis') project.sourceSets.forEach { sourceSet -> forbiddenApisCli.dependsOn( - project.tasks.create(sourceSet.getTaskName('forbiddenApisCli', null), JavaExec) { + project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') dependsOn(buildResources) - classpath = project.files( - project.configurations.forbiddenApisCliJar, + execAction = { spec -> + spec.classpath = project.files( + project.configurations.forbiddenApisCliJar, + sourceSet.compileClasspath, + sourceSet.runtimeClasspath + ) + spec.executable = "${project.runtimeJavaHome}/bin/java" + } + inputs.files( + forbiddenApisConfiguration, sourceSet.compileClasspath, sourceSet.runtimeClasspath ) - main = 'de.thetaphi.forbiddenapis.cli.CliMain' - executable = "${project.runtimeJavaHome}/bin/java" - args "-b", 'jdk-unsafe-1.8' - args "-b", 'jdk-deprecated-1.8' - args "-b", 'jdk-non-portable' - args "-b", 'jdk-system-out' - args "-f", buildResources.copy("forbidden/jdk-signatures.txt") - args "-f", buildResources.copy("forbidden/es-all-signatures.txt") - args "--suppressannotation", '**.SuppressForbidden' + + targetCompatibility = project.compilerJavaVersion + bundledSignatures = [ + "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out" + ] + signaturesFiles = project.files( + buildResources.copy("forbidden/jdk-signatures.txt"), + buildResources.copy("forbidden/es-all-signatures.txt") + ) + suppressAnnotations = ['**.SuppressForbidden'] if (sourceSet.name == 'test') { - args "-f", buildResources.copy("forbidden/es-test-signatures.txt") - args "-f", buildResources.copy("forbidden/http-signatures.txt") + signaturesFiles += project.files( + buildResources.copy("forbidden/es-test-signatures.txt"), + buildResources.copy("forbidden/http-signatures.txt") + ) } else { - args "-f", buildResources.copy("forbidden/es-server-signatures.txt") + signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt")) } dependsOn sourceSet.classesTaskName - doFirst { - // Forbidden APIs expects only existing dirs, and requires at least one - FileCollection existingOutputs = sourceSet.output.classesDirs - .filter { it.exists() } - if (existingOutputs.isEmpty()) { - throw new StopExecutionException("${sourceSet.name} has no outputs") - } - existingOutputs.forEach { args "-d", it } + classesDirs = sourceSet.output.classesDirs + ext.replaceSignatureFiles = { String... names -> + signaturesFiles = project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) + } + ext.addSignatureFiles = { String... names -> + signaturesFiles += project.files( + names.collect { buildResources.copy("forbidden/${it}.txt") } + ) } } ) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index d6babbbfbb8b2..52b13a5664427 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.precommit; +import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.apache.tools.ant.BuildEvent; import org.apache.tools.ant.BuildException; import org.apache.tools.ant.BuildListener; @@ -82,6 +83,11 @@ public class ThirdPartyAuditTask extends AntTask { configuration = project.configurations.findByName('testCompile') } assert configuration != null + if (project.plugins.hasPlugin(ShadowPlugin)) { + Configuration original = configuration + configuration = project.configurations.create('thirdPartyAudit') + configuration.extendsFrom(original, project.configurations.bundle) + } if (compileOnly == null) { classpath = configuration } else { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java index 03c18f54e67ef..4af104093a5cb 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java @@ -35,6 +35,7 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -105,7 +106,7 @@ public void doExport() { if (is == null) { throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); } - Files.copy(is, destination); + Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java new file mode 100644 index 0000000000000..e33f167096414 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import de.thetaphi.forbiddenapis.cli.CliMain; +import org.gradle.api.Action; +import org.gradle.api.DefaultTask; +import org.gradle.api.JavaVersion; +import org.gradle.api.file.FileCollection; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.JavaExecSpec; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +public class ForbiddenApisCliTask extends DefaultTask { + + private FileCollection signaturesFiles; + private List signatures = new ArrayList<>(); + private Set bundledSignatures = new LinkedHashSet<>(); + private Set suppressAnnotations = new LinkedHashSet<>(); + private JavaVersion targetCompatibility; + private FileCollection classesDirs; + private Action execAction; + + public JavaVersion getTargetCompatibility() { + return targetCompatibility; + } + + public void setTargetCompatibility(JavaVersion targetCompatibility) { + this.targetCompatibility = targetCompatibility; + } + + public Action getExecAction() { + return execAction; + } + + public void setExecAction(Action execAction) { + this.execAction = execAction; + } + + @OutputFile + public File getMarkerFile() { + return new File( + new File(getProject().getBuildDir(), "precommit"), + getName() + ); + } + + @InputFiles + @SkipWhenEmpty + public FileCollection getClassesDirs() { + return classesDirs.filter(File::exists); + } + + public void setClassesDirs(FileCollection classesDirs) { + this.classesDirs = classesDirs; + } + + @InputFiles + public FileCollection getSignaturesFiles() { + return signaturesFiles; + } + + public void setSignaturesFiles(FileCollection signaturesFiles) { + this.signaturesFiles = signaturesFiles; + } + + @Input + public List getSignatures() { + return signatures; + } + + public void setSignatures(List signatures) { + this.signatures = signatures; + } + + @Input + public Set getBundledSignatures() { + return bundledSignatures; + } + + public void setBundledSignatures(Set bundledSignatures) { + this.bundledSignatures = bundledSignatures; + } + + @Input + public Set getSuppressAnnotations() { + return suppressAnnotations; + } + + public void setSuppressAnnotations(Set suppressAnnotations) { + this.suppressAnnotations = suppressAnnotations; + } + + @TaskAction + public void runForbiddenApisAndWriteMarker() throws IOException { + getProject().javaexec((JavaExecSpec spec) -> { + execAction.execute(spec); + spec.setMain(CliMain.class.getName()); + // build the command line + getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath())); + getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation)); + getBundledSignatures().forEach(bundled -> { + // there's no option for target compatibility so we have to interpret it + final String prefix; + if (bundled.equals("jdk-system-out") || + bundled.equals("jdk-reflection") || + bundled.equals("jdk-non-portable")) { + prefix = ""; + } else { + prefix = "-" + ( + getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ? + getTargetCompatibility().getMajorVersion() : + "1." + getTargetCompatibility().getMajorVersion()) + ; + } + spec.args("-b", bundled + prefix); + } + ); + getClassesDirs().forEach(dir -> + spec.args("-d", dir) + ); + }); + Files.write(getMarkerFile().toPath(), Collections.emptyList()); + } + +} diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy index ad36c84078398..f6b9cb5fc95bf 100644 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy +++ b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy @@ -26,7 +26,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) @@ -65,7 +65,7 @@ class VersionCollectionTests extends GradleUnitTestCase { assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) assertEquals(vc.stagedMinorSnapshot, null) assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) + assertNull(vc.maintenanceBugfixSnapshot) vc.indexCompatible.containsAll(vc.versions) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 6f5eab6e1db1e..9acfc630f94f5 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -16,8 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RestIntegTestTask import org.gradle.api.internal.provider.Providers @@ -47,13 +45,13 @@ dependencies { * Everything in the "shadow" configuration is *not* copied into the * shadowJar. */ - shadow "org.elasticsearch:elasticsearch:${version}" - shadow "org.elasticsearch.client:elasticsearch-rest-client:${version}" - shadow "org.elasticsearch.plugin:parent-join-client:${version}" - shadow "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" - shadow "org.elasticsearch.plugin:rank-eval-client:${version}" - shadow "org.elasticsearch.plugin:lang-mustache-client:${version}" - compile project(':x-pack:protocol') + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.plugin:parent-join-client:${version}" + compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" + compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile "org.elasticsearch.plugin:lang-mustache-client:${version}" + bundle project(':x-pack:protocol') testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" @@ -75,8 +73,8 @@ dependencyLicenses { forbiddenApisMain { // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already // specified - signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] - signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] + addSignatureFiles 'http-signatures' + signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } integTestCluster { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java new file mode 100644 index 0000000000000..293105f5abeb8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + + +public class GraphClient { + private final RestHighLevelClient restHighLevelClient; + + GraphClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore, + options, GraphExploreResponse::fromXContext, emptySet()); + } + + /** + * Asynchronously executes an exploration request using the Graph API. + * + * See Graph API + * on elastic.co. + */ + public final void exploreAsync(GraphExploreRequest graphExploreRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, RequestConverters::xPackGraphExplore, + options, GraphExploreResponse::fromXContext, listener, emptySet()); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index e26a4c629a0b0..6c1cc20570102 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -20,10 +20,15 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.common.Strings; +import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -48,6 +53,23 @@ static Request putJob(PutJobRequest putJobRequest) throws IOException { return request; } + static Request getJob(GetJobRequest getJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobRequest.getJobIds())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobRequest.isAllowNoJobs())); + } + + return request; + } + static Request openJob(OpenJobRequest openJobRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -57,7 +79,20 @@ static Request openJob(OpenJobRequest openJobRequest) throws IOException { .addPathPartAsIs("_open") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); - request.setJsonEntity(openJobRequest.toString()); + request.setEntity(createEntity(openJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request closeJob(CloseJobRequest closeJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(closeJobRequest.getJobIds())) + .addPathPartAsIs("_close") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(closeJobRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -75,4 +110,18 @@ static Request deleteJob(DeleteJobRequest deleteJobRequest) { return request; } + + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getBucketsRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("buckets") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getBucketsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 32b6cd6cf2c67..c4dcc1eaffc5a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,8 +19,14 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; +import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; +import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest; +import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse; +import org.elasticsearch.protocol.xpack.ml.GetJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -50,7 +56,7 @@ public final class MachineLearningClient { * For additional info * see ML PUT job documentation * - * @param request the PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The PutJobRequest containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return PutJobResponse with enclosed {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} object * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -69,7 +75,7 @@ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) thro * For additional info * see ML PUT job documentation * - * @param request the request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings + * @param request The request containing the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -82,13 +88,54 @@ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionLis Collections.emptySet()); } + /** + * Gets one or more Machine Learning job configuration info. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobResponse} response object containing + * the {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobResponse getJob(GetJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see + *

+ * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobResponse} upon request completion + */ + public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJob, + options, + GetJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Deletes the given Machine Learning Job *

* For additional info * see ML Delete Job documentation *

- * @param request the request to delete the job + * @param request The request to delete the job * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return action acknowledgement * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -107,7 +154,7 @@ public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions opti * For additional info * see ML Delete Job documentation *

- * @param request the request to delete the job + * @param request The request to delete the job * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -131,7 +178,7 @@ public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, Act * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return response containing if the job was successfully opened or not. * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -154,7 +201,7 @@ public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) t * For additional info * see *

- * @param request request containing job_id and additional optional options + * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ @@ -166,4 +213,76 @@ public void openJobAsync(OpenJobRequest request, RequestOptions options, ActionL listener, Collections.emptySet()); } + + /** + * Closes one or more Machine Learning Jobs. A job can be opened and closed multiple times throughout its lifecycle. + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response containing if the job was successfully closed or not. + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public CloseJobResponse closeJob(CloseJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Closes one or more Machine Learning Jobs asynchronously, notifies listener on completion + * + * A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + * + * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::closeJob, + options, + CloseJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public GetBucketsResponse getBuckets(GetBucketsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the buckets for a Machine Learning Job, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see ML GET buckets documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getBuckets, + options, + GetBucketsResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 0e5fce5b2272c..9dd316a0fb023 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -114,6 +114,7 @@ import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -1124,6 +1125,13 @@ static Request xPackInfo(XPackInfoRequest infoRequest) { return request; } + static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException { + String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore"); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index e705ca12806ba..7376f74839ce1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -209,6 +209,7 @@ public class RestHighLevelClient implements Closeable { private final TasksClient tasksClient = new TasksClient(this); private final XPackClient xPackClient = new XPackClient(this); private final WatcherClient watcherClient = new WatcherClient(this); + private final GraphClient graphClient = new GraphClient(this); private final LicenseClient licenseClient = new LicenseClient(this); private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); @@ -324,6 +325,16 @@ public final XPackClient xpack() { * Watcher APIs on elastic.co for more information. */ public WatcherClient watcher() { return watcherClient; } + + /** + * Provides methods for accessing the Elastic Licensed Graph explore API that + * is shipped with the default distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * Graph API on elastic.co for more information. + */ + public GraphClient graph() { return graphClient; } /** * Provides methods for accessing the Elastic Licensed Licensing APIs that @@ -949,6 +960,11 @@ public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesReque FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, RequestOptions options, @@ -958,15 +974,58 @@ protected final Resp performRequestAndParseEnt response -> parseEntity(response.getEntity(), entityParser), ignores); } + /** + * Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser. + */ + protected final Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + Set ignores) throws IOException { + return performRequest(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), ignores); + } + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final Resp performRequest(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores) throws IOException { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { throw validationException; } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Defines a helper method for performing a request. + */ + protected final Resp performRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { + ValidationException validationException = request.validate(); + if (validationException != null && validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); + } + + /** + * Provides common functionality for performing a request. + */ + private Resp internalPerformRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { Request req = requestConverter.apply(request); req.setOptions(options); Response response; @@ -994,25 +1053,75 @@ protected final Resp performRequest(Req reques } } + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsyncAndParseEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { performRequestAsync(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), listener, ignores); } + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + performRequestAsync(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), listener, ignores); + } + + + /** + * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation + * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + */ + @Deprecated protected final void performRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { + if (validationException != null && validationException.validationErrors().isEmpty() == false) { listener.onFailure(validationException); return; } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Defines a helper method for asynchronously performing a request. + */ + protected final void performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { + ValidationException validationException = request.validate(); + if (validationException != null && validationException.validationErrors().isEmpty() == false) { + listener.onFailure(validationException); + return; + } + internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + } + + /** + * Provides common functionality for asynchronously performing a request. + */ + private void internalPerformRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { Request req; try { req = requestConverter.apply(request); @@ -1026,6 +1135,7 @@ protected final void performRequestAsync(Req r client.performRequestAsync(req, responseListener); } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java new file mode 100644 index 0000000000000..2efff4d3663b8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +/** + * Defines a validation layer for Requests. + */ +public interface Validatable { + ValidationException EMPTY_VALIDATION = new ValidationException() { + @Override + public void addValidationError(String error) { + throw new UnsupportedOperationException("Validation messages should not be added to the empty validation"); + } + }; + + /** + * Perform validation. This method does not have to be overridden in the event that no validation needs to be done. + * + * @return potentially null, in the event of older actions, an empty {@link ValidationException} in newer actions, or finally a + * {@link ValidationException} that contains a list of all failed validation. + */ + default ValidationException validate() { + return EMPTY_VALIDATION; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java new file mode 100644 index 0000000000000..6b5d738d67565 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.util.ArrayList; +import java.util.List; + +/** + * Encapsulates an accumulation of validation errors + */ +public class ValidationException extends IllegalArgumentException { + private final List validationErrors = new ArrayList<>(); + + /** + * Add a new validation error to the accumulating validation errors + * @param error the error to add + */ + public void addValidationError(String error) { + validationErrors.add(error); + } + + /** + * Returns the validation errors accumulated + */ + public final List validationErrors() { + return validationErrors; + } + + @Override + public final String getMessage() { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";"); + } + return sb.toString(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java new file mode 100644 index 0000000000000..4376b47d737b4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +public class GraphIT extends ESRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/1"); + doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}"); + client().performRequest(doc2); + + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}"); + client().performRequest(doc3); + + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}"); + client().performRequest(doc4); + + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/type/2"); + doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}"); + client().performRequest(doc5); + + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCleanExplore() throws Exception { + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(0)); + + } + + public void testBadExplore() throws Exception { + //Explore indices where lack of fielddata=true on one index leads to partial failures + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.indices("index1", "index2", "index_no_field_data"); + graphExploreRequest.useSignificance(false); + int numHops = 3; + for (int i = 0; i < numHops; i++) { + QueryBuilder guidingQuery = null; + if (i == 0) { + guidingQuery = new TermQueryBuilder("const.keyword", "start"); + } else if (randomBoolean()){ + guidingQuery = new TermQueryBuilder("const.keyword", "foo"); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + VertexRequest vr = hop.addVertexRequest("num"); + vr.minDocCount(1); + } + Map expectedTermsAndDepths = new HashMap<>(); + expectedTermsAndDepths.put("1", 0); + expectedTermsAndDepths.put("2", 1); + expectedTermsAndDepths.put("3", 2); + + GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT); + Map actualTermsAndDepths = new HashMap<>(); + Collection v = exploreResponse.getVertices(); + for (Vertex vertex : v) { + actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth()); + } + assertEquals(expectedTermsAndDepths, actualTermsAndDepths); + assertThat(exploreResponse.isTimedOut(), Matchers.is(false)); + ShardOperationFailedException[] failures = exploreResponse.getShardFailures(); + assertThat(failures.length, Matchers.equalTo(1)); + assertTrue(failures[0].reason().contains("Fielddata is disabled")); + + } + + +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 43a41960e003c..9065cda9cd6fc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -20,16 +20,22 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest; +import org.elasticsearch.protocol.xpack.ml.GetJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayOutputStream; @@ -46,6 +52,7 @@ public void testPutJob() throws IOException { Request request = MLRequestConverters.putJob(putJobRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo")); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { Job parsedJob = Job.PARSER.apply(parser, null).build(); @@ -53,6 +60,23 @@ public void testPutJob() throws IOException { } } + public void testGetJob() { + GetJobRequest getJobRequest = new GetJobRequest(); + + Request request = MLRequestConverters.getJob(getJobRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobRequest = new GetJobRequest("job1", "jobs*"); + getJobRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJob(getJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + public void testOpenJob() throws Exception { String jobId = "some-job-id"; OpenJobRequest openJobRequest = new OpenJobRequest(jobId); @@ -61,9 +85,27 @@ public void testOpenJob() throws Exception { Request request = MLRequestConverters.openJob(openJobRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - request.getEntity().writeTo(bos); - assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + assertEquals(requestEntityToString(request), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + } + + public void testCloseJob() throws Exception { + String jobId = "somejobid"; + CloseJobRequest closeJobRequest = new CloseJobRequest(jobId); + + Request request = MLRequestConverters.closeJob(closeJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid\"}", requestEntityToString(request)); + + closeJobRequest = new CloseJobRequest(jobId, "otherjobs*"); + closeJobRequest.setForce(true); + closeJobRequest.setAllowNoJobs(false); + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); + request = MLRequestConverters.closeJob(closeJobRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + ",otherjobs*/_close", request.getEndpoint()); + assertEquals("{\"job_id\":\"somejobid,otherjobs*\",\"timeout\":\"10m\",\"force\":true,\"allow_no_jobs\":false}", + requestEntityToString(request)); } public void testDeleteJob() { @@ -80,6 +122,23 @@ public void testDeleteJob() { assertEquals(Boolean.toString(true), request.getParameters().get("force")); } + public void testGetBuckets() throws IOException { + String jobId = randomAlphaOfLength(10); + GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); + getBucketsRequest.setPageParams(new PageParams(100, 300)); + getBucketsRequest.setAnomalyScore(75.0); + getBucketsRequest.setSort("anomaly_score"); + getBucketsRequest.setDescending(true); + + Request request = MLRequestConverters.getBuckets(getBucketsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/buckets", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetBucketsRequest parsedRequest = GetBucketsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getBucketsRequest)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); @@ -87,4 +146,10 @@ private static Job createValidJob(String jobId) { jobBuilder.setAnalysisConfig(analysisConfig); return jobBuilder.build(); } -} \ No newline at end of file + + private static String requestEntityToString(Request request) throws Exception { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + request.getEntity().writeTo(bos); + return bos.toString("UTF-8"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java new file mode 100644 index 0000000000000..a4f83c347ad13 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest; +import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse; +import org.elasticsearch.protocol.xpack.ml.PutJobRequest; +import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.protocol.xpack.ml.job.results.Bucket; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { + + private static final String RESULTS_INDEX = ".ml-anomalies-shared"; + private static final String DOC = "doc"; + + private static final String JOB_ID = "get-results-it-job"; + + // 2018-08-01T00:00:00Z + private static final long START_TIME_EPOCH_MS = 1533081600000L; + + private BucketStats bucketStats = new BucketStats(); + + @Before + public void createJobAndIndexResults() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + Job job = MachineLearningIT.buildJob(JOB_ID); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + long time = START_TIME_EPOCH_MS; + long endTime = time + 3600000L * 24 * 10; // 10 days of hourly buckets + while (time < endTime) { + addBucketIndexRequest(time, false, bulkRequest); + addRecordIndexRequests(time, false, bulkRequest); + time += 3600000L; + } + + // Also index an interim bucket + addBucketIndexRequest(time, true, bulkRequest); + addRecordIndexRequests(time, true, bulkRequest); + + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + } + + private void addBucketIndexRequest(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double bucketScore = randomDoubleBetween(0.0, 100.0, true); + bucketStats.report(bucketScore); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"bucket\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"anomaly_score\": " + bucketScore + + ", \"bucket_influencers\":[{\"job_id\": \"" + JOB_ID + "\", \"result_type\":\"bucket_influencer\", " + + "\"influencer_field_name\": \"bucket_time\", \"timestamp\": " + timestamp + ", \"bucket_span\": 3600, " + + "\"is_interim\": " + isInterim + "}]}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + private void addRecordIndexRequests(long timestamp, boolean isInterim, BulkRequest bulkRequest) { + if (randomBoolean()) { + return; + } + int recordCount = randomIntBetween(1, 3); + for (int i = 0; i < recordCount; ++i) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + double recordScore = randomDoubleBetween(0.0, 100.0, true); + double p = randomDoubleBetween(0.0, 0.05, false); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"record\", \"timestamp\": " + timestamp + "," + + "\"bucket_span\": 3600,\"is_interim\": " + isInterim + ", \"record_score\": " + recordScore + ", \"probability\": " + + p + "}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + } + + @After + public void deleteJob() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + + public void testGetBuckets() throws IOException { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(241L)); + assertThat(response.buckets().size(), equalTo(100)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setTimestamp("1533081600000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.buckets().size(), equalTo(1)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setAnomalyScore(75.0); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(bucketStats.criticalCount)); + assertThat(response.buckets().size(), equalTo((int) Math.min(100, bucketStats.criticalCount))); + assertThat(response.buckets().stream().anyMatch(b -> b.getAnomalyScore() < 75.0), is(false)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setExcludeInterim(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(240L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setStart("1533081600000"); + request.setEnd("1533092400000"); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 2 * + 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setPageParams(new PageParams(3, 3)); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().size(), equalTo(3)); + assertThat(response.buckets().get(0).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 3 * 3600000L)); + assertThat(response.buckets().get(1).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 4 * 3600000L)); + assertThat(response.buckets().get(2).getTimestamp().getTime(), equalTo(START_TIME_EPOCH_MS + 5 * 3600000L)); + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + request.setSort("anomaly_score"); + request.setDescending(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + double previousScore = 100.0; + for (Bucket bucket : response.buckets()) { + assertThat(bucket.getAnomalyScore(), lessThanOrEqualTo(previousScore)); + previousScore = bucket.getAnomalyScore(); + } + } + { + GetBucketsRequest request = new GetBucketsRequest(JOB_ID); + // Make sure we get all buckets + request.setPageParams(new PageParams(0, 10000)); + request.setExpand(true); + + GetBucketsResponse response = execute(request, machineLearningClient::getBuckets, machineLearningClient::getBucketsAsync); + + assertThat(response.buckets().stream().anyMatch(b -> b.getRecords().size() > 0), is(true)); + } + } + + private static class BucketStats { + // score < 50.0 + private long minorCount; + + // score < 75.0 + private long majorCount; + + // score > 75.0 + private long criticalCount; + + private void report(double anomalyScore) { + if (anomalyScore < 50.0) { + minorCount++; + } else if (anomalyScore < 75.0) { + majorCount++; + } else { + criticalCount++; + } + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 95a29e99e5266..cec5dd7ccf8ff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,10 +19,13 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; +import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; +import org.elasticsearch.protocol.xpack.ml.GetJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -31,15 +34,25 @@ import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.junit.After; +import java.io.IOException; import java.util.Arrays; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993") public class MachineLearningIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testPutJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -52,6 +65,41 @@ public void testPutJob() throws Exception { assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE)); } + public void testGetJob() throws Exception { + String jobId1 = randomValidJobId(); + String jobId2 = randomValidJobId(); + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + GetJobRequest request = new GetJobRequest(jobId1, jobId2); + + // Test getting specific jobs + GetJobResponse response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + + // Test getting all jobs explicitly + request = GetJobRequest.getAllJobsRequest(); + response = execute(request, machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all jobs implicitly + response = execute(new GetJobRequest(), machineLearningClient::getJob, machineLearningClient::getJobAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobs().size() >= 2L); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + } + public void testDeleteJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); @@ -77,6 +125,19 @@ public void testOpenJob() throws Exception { assertTrue(response.isOpened()); } + public void testCloseJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + CloseJobResponse response = execute(new CloseJobRequest(jobId), + machineLearningClient::closeJob, + machineLearningClient::closeJobAsync); + assertTrue(response.isClosed()); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java new file mode 100644 index 0000000000000..7ad86576245ef --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlRestTestStateCleaner.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * This is temporarily duplicated from the server side. + * @TODO Replace with an implementation using the HLRC once + * the APIs for managing datafeeds are implemented. + */ +public class MlRestTestStateCleaner { + + private final Logger logger; + private final RestClient adminClient; + + public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { + this.logger = logger; + this.adminClient = adminClient; + } + + public void clearMlMetadata() throws IOException { + deleteAllDatafeeds(); + deleteAllJobs(); + // indices will be deleted by the ESRestTestCase class + } + + @SuppressWarnings("unchecked") + private void deleteAllDatafeeds() throws IOException { + final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds"); + datafeedsRequest.addParameter("filter_path", "datafeeds"); + final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest); + final List> datafeeds = + (List>) XContentMapValues.extractValue("datafeeds", ESRestTestCase.entityAsMap(datafeedsResponse)); + if (datafeeds == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop")); + } catch (Exception e1) { + logger.warn("failed to stop all datafeeds. Forcing stop", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all data feeds failed", e2); + } + throw new RuntimeException( + "Had to resort to force-stopping datafeeds, something went wrong?", e1); + } + + for (Map datafeed : datafeeds) { + String datafeedId = (String) datafeed.get("datafeed_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/datafeeds/" + datafeedId)); + } + } + + private void deleteAllJobs() throws IOException { + final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors"); + jobsRequest.addParameter("filter_path", "jobs"); + final Response response = adminClient.performRequest(jobsRequest); + @SuppressWarnings("unchecked") + final List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", ESRestTestCase.entityAsMap(response)); + if (jobConfigs == null) { + return; + } + + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close")); + } catch (Exception e1) { + logger.warn("failed to close all jobs. Forcing closed", e1); + try { + adminClient.performRequest(new Request("POST", "/_xpack/ml/anomaly_detectors/_all/_close?force=true")); + } catch (Exception e2) { + logger.warn("Force-closing all jobs failed", e2); + } + throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?", + e1); + } + + for (Map jobConfig : jobConfigs) { + String jobId = (String) jobConfig.get("job_id"); + adminClient.performRequest(new Request("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 47195f0bb2aba..ebabb8f95b594 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -118,6 +118,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.rankeval.PrecisionAtK; @@ -128,6 +129,8 @@ import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -2598,6 +2601,35 @@ public void testXPackPutWatch() throws Exception { request.getEntity().writeTo(bos); assertThat(bos.toString("UTF-8"), is(body)); } + + public void testGraphExplore() throws Exception { + Map expectedParams = new HashMap<>(); + + GraphExploreRequest graphExploreRequest = new GraphExploreRequest(); + graphExploreRequest.sampleDiversityField("diversity"); + graphExploreRequest.indices("index1", "index2"); + graphExploreRequest.types("type1", "type2"); + int timeout = randomIntBetween(10000, 20000); + graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout)); + graphExploreRequest.useSignificance(randomBoolean()); + int numHops = randomIntBetween(1, 5); + for (int i = 0; i < numHops; i++) { + int hopNumber = i + 1; + QueryBuilder guidingQuery = null; + if (randomBoolean()) { + guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber); + } + Hop hop = graphExploreRequest.createNextHop(guidingQuery); + hop.addVertexRequest("field" + hopNumber); + hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber); + } + Request request = RequestConverters.xPackGraphExplore(graphExploreRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); + assertToXContentBody(graphExploreRequest, request.getEntity()); + } public void testXPackDeleteWatch() { DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b5d8dbb628eb9..1036b79a4a5d6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -758,6 +758,7 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("license.") == false && apiName.startsWith("machine_learning.") == false && apiName.startsWith("watcher.") == false && + apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false) { apiNotFound.add(apiName); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9c9c5425f0006..739a590ba5f64 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -256,7 +256,7 @@ public void testSearchWithTermsAgg() throws IOException { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -293,7 +293,7 @@ public void testSearchWithRangeAgg() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Range rangeAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", rangeAgg.getName()); assertEquals(2, rangeAgg.getBuckets().size()); @@ -323,7 +323,7 @@ public void testSearchWithTermsAndRangeAgg() throws IOException { assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); Terms termsAgg = searchResponse.getAggregations().get("agg1"); assertEquals("agg1", termsAgg.getName()); assertEquals(2, termsAgg.getBuckets().size()); @@ -375,7 +375,7 @@ public void testSearchWithMatrixStats() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); @@ -474,7 +474,7 @@ public void testSearchWithParentJoin() throws IOException { assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(3, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); Terms terms = searchResponse.getAggregations().get("top-tags"); assertEquals(0, terms.getDocCountError()); @@ -513,7 +513,7 @@ public void testSearchWithSuggest() throws IOException { assertNull(searchResponse.getAggregations()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); assertEquals(0, searchResponse.getHits().totalHits); - assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); + assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(1, searchResponse.getSuggest().size()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java new file mode 100644 index 0000000000000..8631e18b8739b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.documentation; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class GraphDocumentationIT extends ESRestHighLevelClientTestCase { + + + @Before + public void indexDocuments() throws IOException { + // Create chain of doc IDs across indices 1->2->3 + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/type/1"); + doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}"); + client().performRequest(doc1); + + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/type/2"); + doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}"); + client().performRequest(doc2); + + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + @SuppressForbidden(reason = "system out is ok for a documentation example") + public void testExplore() throws Exception { + RestHighLevelClient client = highLevelClient(); + + + + // tag::x-pack-graph-explore-request + GraphExploreRequest request = new GraphExploreRequest(); + request.indices("index1", "index2"); + request.useSignificance(false); + TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx"); + + Hop hop1 = request.createNextHop(startingQuery); // <1> + VertexRequest people = hop1.addVertexRequest("participants"); // <2> + people.minDocCount(1); + VertexRequest files = hop1.addVertexRequest("attachment_md5"); + files.minDocCount(1); + + Hop hop2 = request.createNextHop(null); // <3> + VertexRequest vr2 = hop2.addVertexRequest("participants"); + vr2.minDocCount(5); + + GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4> + // end::x-pack-graph-explore-request + + + // tag::x-pack-graph-explore-response + Collection v = exploreResponse.getVertices(); + Collection c = exploreResponse.getConnections(); + for (Vertex vertex : v) { + System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1> + " discovered at hop depth " + vertex.getHopDepth()); + } + for (Connection link : c) { + System.out.println(link.getFrom() + " -> " + link.getTo() // <2> + + " evidenced by " + link.getDocCount() + " docs"); + } + // end::x-pack-graph-explore-response + + + Collection initialVertices = exploreResponse.getVertices(); + + // tag::x-pack-graph-explore-expand + GraphExploreRequest expandRequest = new GraphExploreRequest(); + expandRequest.indices("index1", "index2"); + + + Hop expandHop1 = expandRequest.createNextHop(null); // <1> + VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + fromPeople.addInclude(vertex.getTerm(), 1f); + } + } + + Hop expandHop2 = expandRequest.createNextHop(null); + VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3> + for (Vertex vertex : initialVertices) { + if (vertex.getField().equals("participants")) { + newPeople.addExclude(vertex.getTerm()); + } + } + + GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT); + // end::x-pack-graph-explore-expand + + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index a77d8b43e5737..683f91dae2eb1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -20,13 +20,23 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.MachineLearningIT; +import org.elasticsearch.client.MlRestTestStateCleaner; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.protocol.xpack.ml.CloseJobRequest; +import org.elasticsearch.protocol.xpack.ml.CloseJobResponse; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; +import org.elasticsearch.protocol.xpack.ml.GetBucketsRequest; +import org.elasticsearch.protocol.xpack.ml.GetBucketsResponse; +import org.elasticsearch.protocol.xpack.ml.GetJobRequest; +import org.elasticsearch.protocol.xpack.ml.GetJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -35,17 +45,29 @@ import org.elasticsearch.protocol.xpack.ml.job.config.DataDescription; import org.elasticsearch.protocol.xpack.ml.job.config.Detector; import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.protocol.xpack.ml.job.results.Bucket; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; +import org.junit.After; +import java.io.IOException; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { + @After + public void cleanUp() throws IOException { + new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); + } + public void testCreateJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -124,6 +146,63 @@ public void onFailure(Exception e) { } } + public void testGetJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String jobId = "get-machine-learning-job1"; + + Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-request + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-request + + //tag::x-pack-ml-get-job-execute + GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT); + long numberOfJobs = response.count(); //<1> + List jobs = response.jobs(); //<2> + //end::x-pack-ml-get-job-execute + + assertEquals(2, response.count()); + assertThat(response.jobs(), hasSize(2)); + assertThat(response.jobs().stream().map(Job::getId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); + + // tag::x-pack-ml-get-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-execute-async + client.machineLearning().getJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testDeleteJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -221,4 +300,158 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testCloseJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + Job job = MachineLearningIT.buildJob("closing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-request + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); //<1> + closeJobRequest.setForce(false); //<2> + closeJobRequest.setAllowNoJobs(true); //<3> + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<4> + //end::x-pack-ml-close-job-request + + //tag::x-pack-ml-close-job-execute + CloseJobResponse closeJobResponse = client.machineLearning().closeJob(closeJobRequest, RequestOptions.DEFAULT); + boolean isClosed = closeJobResponse.isClosed(); //<1> + //end::x-pack-ml-close-job-execute + + } + { + Job job = MachineLearningIT.buildJob("closing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + //tag::x-pack-ml-close-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(CloseJobResponse closeJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-close-job-listener + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-close-job-execute-async + client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-close-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testGetBuckets() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-buckets"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a bucket + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-get-buckets\", \"result_type\":\"bucket\", \"timestamp\": 1533081600000," + + "\"bucket_span\": 600,\"is_interim\": false, \"anomaly_score\": 80.0}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-buckets-request + GetBucketsRequest request = new GetBucketsRequest(jobId); // <1> + // end::x-pack-ml-get-buckets-request + + // tag::x-pack-ml-get-buckets-timestamp + request.setTimestamp("2018-08-17T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-timestamp + + // Set timestamp to null as it is incompatible with other args + request.setTimestamp(null); + + // tag::x-pack-ml-get-buckets-anomaly-score + request.setAnomalyScore(75.0); // <1> + // end::x-pack-ml-get-buckets-anomaly-score + + // tag::x-pack-ml-get-buckets-desc + request.setDescending(true); // <1> + // end::x-pack-ml-get-buckets-desc + + // tag::x-pack-ml-get-buckets-end + request.setEnd("2018-08-21T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-end + + // tag::x-pack-ml-get-buckets-exclude-interim + request.setExcludeInterim(true); // <1> + // end::x-pack-ml-get-buckets-exclude-interim + + // tag::x-pack-ml-get-buckets-expand + request.setExpand(true); // <1> + // end::x-pack-ml-get-buckets-expand + + // tag::x-pack-ml-get-buckets-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-buckets-page + + // Set page params back to null so the response contains the bucket we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-buckets-sort + request.setSort("anomaly_score"); // <1> + // end::x-pack-ml-get-buckets-sort + + // tag::x-pack-ml-get-buckets-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::x-pack-ml-get-buckets-start + + // tag::x-pack-ml-get-buckets-execute + GetBucketsResponse response = client.machineLearning().getBuckets(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-buckets-execute + + // tag::x-pack-ml-get-buckets-response + long count = response.count(); // <1> + List buckets = response.buckets(); // <2> + // end::x-pack-ml-get-buckets-response + assertEquals(1, buckets.size()); + } + { + GetBucketsRequest request = new GetBucketsRequest(jobId); + + // tag::x-pack-ml-get-buckets-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetBucketsResponse getBucketsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-buckets-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-buckets-execute-async + client.machineLearning().getBucketsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-buckets-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index fc2ab0bc4c05d..273836a31f0cb 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -16,9 +18,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -53,10 +52,9 @@ dependencies { testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" } -forbiddenApisMain { +tasks.withType(ForbiddenApisCliTask) { //client does not depend on server, so only jdk and http signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] + replaceSignatureFiles ('jdk-signatures', 'http-signatures') } forbiddenPatterns { @@ -67,9 +65,6 @@ forbiddenApisTest { //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' - //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/http-signatures.txt')] } // JarHell is part of es server, which we don't want to pull in diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 41146e0b7ec08..6ba69c5713c57 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -55,7 +52,7 @@ dependencies { forbiddenApisMain { //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -63,7 +60,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/client/test/build.gradle b/client/test/build.gradle index cc69a1828dc85..e66d2be57f1ea 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -16,10 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 @@ -36,7 +32,7 @@ dependencies { forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { @@ -44,7 +40,7 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } // JarHell is part of es server, which we don't want to pull in diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 944a038edd97c..269a37105fb19 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,8 +44,7 @@ dependencyLicenses { forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions { diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index ad9b56fec0502..6d18b79d4bddf 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -1,11 +1,11 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 // java_version_checker do not depend on core so only JDK signatures should be checked -forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +forbiddenApisMain { + replaceSignatureFiles 'jdk-signatures' +} test.enabled = false namingConventions.enabled = false diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index a774691b2eb17..ca1aa6bcac9d6 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -17,8 +17,9 @@ * under the License. */ -import org.elasticsearch.gradle.precommit.PrecommitTasks -import org.gradle.api.JavaVersion + + +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask apply plugin: 'elasticsearch.build' @@ -31,10 +32,9 @@ dependencies { archivesBaseName = 'elasticsearch-launchers' -// java_version_checker do not depend on core so only JDK signatures should be checked -List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -forbiddenApisMain.signaturesURLs = jdkSignatures -forbiddenApisTest.signaturesURLs = jdkSignatures +tasks.withType(ForbiddenApisCliTask) { + replaceSignatureFiles 'jdk-signatures' +} namingConventions { testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase' diff --git a/docs/java-rest/high-level/graph/explore.asciidoc b/docs/java-rest/high-level/graph/explore.asciidoc new file mode 100644 index 0000000000000..f2718209f4b90 --- /dev/null +++ b/docs/java-rest/high-level/graph/explore.asciidoc @@ -0,0 +1,53 @@ +[[java-rest-high-x-pack-graph-explore]] +=== X-Pack Graph explore API + +[[java-rest-high-x-pack-graph-explore-execution]] +==== Initial request + +Graph queries are executed using the `explore()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request] +-------------------------------------------------- +<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx` +<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes +of any attached files. In each case, we want to find people or files that have had at least one document connecting them +to projectx. +<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files +discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people +discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a +date range to consider only recent communications but we pass null to consider all connections. +<4> Finally we call the graph explore API with the GraphExploreRequest object. + + +==== Response + +Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively): + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response] +-------------------------------------------------- +<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the +requested exploration this term was first discovered. +<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two +Vertex terms have been sighted together + + +[[java-rest-high-x-pack-graph-expand-execution]] +==== Expanding a client-side Graph + +Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown. + +To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand] +-------------------------------------------------- +<1> Unlike the initial request we do not need to pass a starting query +<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter. +We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all. +<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter + diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc new file mode 100644 index 0000000000000..edadb9f40a214 --- /dev/null +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -0,0 +1,59 @@ +[[java-rest-high-x-pack-ml-close-job]] +=== Close Job API + +The Close Job API provides the ability to close {ml} jobs in the cluster. +It accepts a `CloseJobRequest` object and responds +with a `CloseJobResponse` object. + +[[java-rest-high-x-pack-ml-close-job-request]] +==== Close Job Request + +A `CloseJobRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing job IDs +<2> Optionally used to close a failed job, or to forcefully close a job +which has not responded to its initial close request. +<3> Optionally set to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) +<4> Optionally setting the `timeout` value for how long the +execution should wait for the job to be closed. + +[[java-rest-high-x-pack-ml-close-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute] +-------------------------------------------------- +<1> `isClosed()` from the `CloseJobResponse` indicates if the job was successfully +closed or not. + +[[java-rest-high-x-pack-ml-close-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-execute-async] +-------------------------------------------------- +<1> The `CloseJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `CloseJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-close-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/get-buckets.asciidoc b/docs/java-rest/high-level/ml/get-buckets.asciidoc new file mode 100644 index 0000000000000..81a21d3d18ac1 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-buckets.asciidoc @@ -0,0 +1,125 @@ +[[java-rest-high-x-pack-ml-get-buckets]] +=== Get Buckets API + +The Get Buckets API retrieves one or more bucket results. +It accepts a `GetBucketsRequest` object and responds +with a `GetBucketsResponse` object. + +[[java-rest-high-x-pack-ml-get-buckets-request]] +==== Get Buckets Request + +A `GetBucketsRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-timestamp] +-------------------------------------------------- +<1> The timestamp of the bucket to get. Otherwise it will return all buckets. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-anomaly-score] +-------------------------------------------------- +<1> Buckets with anomaly scores greater or equal than this value will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-desc] +-------------------------------------------------- +<1> If `true`, the buckets are sorted in descending order. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end] +-------------------------------------------------- +<1> Buckets with timestamps earlier than this time will be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-exclude-interim] +-------------------------------------------------- +<1> If `true`, interim results will be excluded. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-expand] +-------------------------------------------------- +<1> If `true`, buckets will include their anomaly records. Defaults to `false`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of buckets to skip. +`size` specifies the maximum number of buckets to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-sort] +-------------------------------------------------- +<1> The field to sort buckets on. Defaults to `timestamp`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-end] +-------------------------------------------------- +<1> Buckets with timestamps on or after this time will be returned. + +[[java-rest-high-x-pack-ml-get-buckets-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-buckets-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-execute-async] +-------------------------------------------------- +<1> The `GetBucketsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetBucketsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-buckets-response]] +==== Get Buckets Response + +The returned `GetBucketsResponse` contains the requested buckets: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-buckets-response] +-------------------------------------------------- +<1> The count of buckets that were matched +<2> The buckets retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job.asciidoc b/docs/java-rest/high-level/ml/get-job.asciidoc new file mode 100644 index 0000000000000..4ecf70e8e6538 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job.asciidoc @@ -0,0 +1,57 @@ +[[java-rest-high-x-pack-ml-get-job]] +=== Get Job API + +The Get Job API provides the ability to get {ml} jobs in the cluster. +It accepts a `GetJobRequest` object and responds +with a `GetJobResponse` object. + +[[java-rest-high-x-pack-ml-get-job-request]] +==== Get Job Request + +A `GetJobRequest` object gets can have any number of `jobId` or `groupName` +entries. However, they all must be non-null. An empty list is the same as +requesting for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute] +-------------------------------------------------- +<1> `getCount()` from the `GetJobResponse` indicates the number of jobs found +<2> `getJobs()` is the collection of {ml} `Job` objects found + +[[java-rest-high-x-pack-ml-get-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-execute-async] +-------------------------------------------------- +<1> The `GetJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index ad575121818bc..be6a518df193f 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -44,7 +44,7 @@ include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-open-job-exec the execution completes The method does not block and returns immediately. The passed `ActionListener` is used -to notify the caller of completion. A typical `ActionListner` for `OpenJobResponse` may +to notify the caller of completion. A typical `ActionListener` for `OpenJobResponse` may look like ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 6bcb736243a7c..e04e391f3e0b7 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -205,12 +205,18 @@ include::licensing/delete-license.asciidoc[] The Java High Level REST Client supports the following Machine Learning APIs: * <> +* <> * <> * <> +* <> +* <> include::ml/put-job.asciidoc[] +include::ml/get-job.asciidoc[] include::ml/delete-job.asciidoc[] include::ml/open-job.asciidoc[] +include::ml/close-job.asciidoc[] +include::ml/get-buckets.asciidoc[] == Migration APIs @@ -229,3 +235,11 @@ The Java High Level REST Client supports the following Watcher APIs: include::watcher/put-watch.asciidoc[] include::watcher/delete-watch.asciidoc[] + +== Graph APIs + +The Java High Level REST Client supports the following Graph APIs: + +* <> + +include::graph/explore.asciidoc[] diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 2aca959778699..30320def79b2d 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -26,7 +26,7 @@ The only variable that is available is `params`, which can be used to access use The result of the script is always converted to a string. If no context is specified then this context is used by default. -====== Example +*Example* Request: @@ -67,7 +67,7 @@ The following parameters may be specified in `context_setup` for a filter contex document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. index:: The name of an index containing a mapping that is compatable with the document being indexed. -====== Example +*Example* [source,js] ---------------------------------------------------------------- @@ -125,7 +125,7 @@ document:: Contains the document that will be temporarily indexed in-memory and index:: The name of an index containing a mapping that is compatable with the document being indexed. query:: If `_score` is used in the script then a query can specified that will be used to compute a score. -====== Example +*Example* [source,js] ---------------------------------------------------------------- diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index 3805b2e564ca4..e2b3c8ec59176 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -144,7 +144,7 @@ Possible response: }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 3dfcc201e7ac4..2866d798b2814 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -25,7 +25,7 @@ the configured remote cluster alias. `num_nodes_connected`:: The number of connected nodes in the remote cluster. -`max_connection_per_cluster`:: +`max_connections_per_cluster`:: The maximum number of connections maintained for the remote cluster. `initial_connect_timeout`:: diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index 3cd21b21df4d6..0e6078ad7b231 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -30,6 +30,10 @@ in similar way to the <> [WARNING] Note that the usage of `/_termvector` is deprecated in 2.0, and replaced by `/_termvectors`. +[WARNING] +Term Vectors API doesn't work on nested fields. `/_termvectors` on a nested +field and any sub-fields of a nested field returns empty results. + [float] === Return values diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 8229f74bdd05b..c69597e74fd61 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1141,7 +1141,7 @@ And the response (partially shown): }, "hits" : { "total" : 1000, - "max_score" : 0.0, + "max_score" : null, "hits" : [ ] }, "aggregations" : { diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index a1e00bac61649..c2b3d700e9b7c 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -67,6 +67,13 @@ process equal to the size of the file being mapped. Before using this class, be sure you have allowed plenty of <>. +[[allow-mmapfs]] +You can restrict the use of the `mmapfs` store type via the setting +`node.store.allow_mmapfs`. This is a boolean setting indicating whether or not +`mmapfs` is allowed. The default is to allow `mmapfs`. This setting is useful, +for example, if you are in an environment where you can not control the ability +to create a lot of memory maps so you need disable the ability to use `mmapfs`. + === Pre-loading data into the file system cache NOTE: This is an expert setting, the details of which may change in the future. diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 95704c6c8bbe3..fe7c6881a064f 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -2,6 +2,9 @@ === `ignore_above` Strings longer than the `ignore_above` setting will not be indexed or stored. +For arrays of strings, `ignore_above` will be applied for each array element separately and string elements longer than `ignore_above` will not be indexed or stored. + +NOTE: All strings/array elements will still be present in the `_source` field, if the latter is enabled which is the default in Elasticsearch. [source,js] -------------------------------------------------- diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 3688a0e945414..73110cd11f5af 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -151,7 +151,7 @@ returns }, "hits": { "total": 3, - "max_score": 0.0, + "max_score": null, "hits": [] }, "aggregations": { diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 094294d85304d..76367115e1302 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -100,3 +100,8 @@ and the context is only accepted if `path` points to a field with `geo_point` ty `max_concurrent_shard_requests` used to limit the total number of concurrent shard requests a single high level search request can execute. In 7.0 this changed to be the max number of concurrent shard requests per node. The default is now `5`. + +==== `max_score` set to `null` when scores are not tracked + +`max_score` used to be set to `0` whenever scores are not tracked. `null` is now used +instead which is a more appropriate value for a scenario where scores are not available. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index d67d8a733ac00..b88c7bf4547b4 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -531,3 +531,16 @@ native realm: * <>, <> * <> * <> + +[role="exclude",id="security-api-role-mapping"] +=== Role mapping APIs + +You can use the following APIs to add, remove, and retrieve role mappings: + +* <>, <> +* <> + +[role="exclude",id="security-api-privileges"] +=== Privilege APIs + +See <>. diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index c68cf0daaf55a..8771915dee696 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -86,6 +86,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. +The request parameter `max_concurrent_shard_requests` can be used to control the +maximum number of concurrent shard requests the each sub search request will execute. +This parameter should be used to protect a single request from overloading a cluster +(e.g., a default request will hit all indices in a cluster which could cause shard request rejections +if the number of shards per node is high). This default is based on the number of +data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through +concurrent request such that this protection will result in poor performance. For +instance in an environment where only a very low number of concurrent search requests are expected +it might help to increase this value to a higher number. + [float] [[msearch-security]] === Security diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index e7c9b593af372..ad24d9c93c6b6 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -161,7 +161,7 @@ be set to `true` in the response. }, "hits": { "total": 1, - "max_score": 0.0, + "max_score": null, "hits": [] } } diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index fa5baf1db2262..bcfcb20d1d53b 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -30,6 +30,27 @@ GET /_search Doc value fields can work on fields that are not stored. +`*` can be used as a wild card, for example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query" : { + "match_all": {} + }, + "docvalue_fields" : [ + { + "field": "*field", <1> + "format": "use_field_mapping" <2> + } + ] +} +-------------------------------------------------- +// CONSOLE +<1> Match all fields ending with `field` +<2> Format to be applied to all matching fields. + Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 9f9833bde9d5c..c52f28bc7bea4 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -258,7 +258,7 @@ Which should look like: }, "hits": { "total" : 0, - "max_score" : 0.0, + "max_score" : null, "hits" : [] }, "suggest": { diff --git a/docs/reference/settings/security-hash-settings.asciidoc b/docs/reference/settings/security-hash-settings.asciidoc new file mode 100644 index 0000000000000..061ca38d545c7 --- /dev/null +++ b/docs/reference/settings/security-hash-settings.asciidoc @@ -0,0 +1,84 @@ +[float] +[[hashing-settings]] +==== User cache and password hash algorithms + +Certain realms store user credentials in memory. To limit exposure +to credential theft and mitigate credential compromise, the cache only stores +a hashed version of the user credentials in memory. By default, the user cache +is hashed with a salted `sha-256` hash algorithm. You can use a different +hashing algorithm by setting the `cache.hash_algo` realm settings to any of the +following values: + +[[cache-hash-algo]] +.Cache hash algorithms +|======================= +| Algorithm | | | Description +| `ssha256` | | | Uses a salted `sha-256` algorithm (default). +| `md5` | | | Uses `MD5` algorithm. +| `sha1` | | | Uses `SHA1` algorithm. +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in + memory. CAUTION: keeping clear text is considered insecure + and can be compromised at the OS level (for example through + memory dumps and using `ptrace`). +|======================= + +Likewise, realms that store passwords hash them using cryptographically strong +and password-specific salt values. You can configure the algorithm for password +hashing by setting the `xpack.security.authc.password_hashing.algorithm` setting +to one of the following: + +[[password-hashing-algorithms]] +.Password hashing algorithms +|======================= +| Algorithm | | | Description + +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. (default) +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `bcrypt10` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt11` | | | Uses `bcrypt` algorithm with salt generated in 2048 rounds. +| `bcrypt12` | | | Uses `bcrypt` algorithm with salt generated in 4096 rounds. +| `bcrypt13` | | | Uses `bcrypt` algorithm with salt generated in 8192 rounds. +| `bcrypt14` | | | Uses `bcrypt` algorithm with salt generated in 16384 rounds. +| `pbkdf2` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_1000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000 iterations. +| `pbkdf2_10000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 10000 iterations. +| `pbkdf2_50000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 50000 iterations. +| `pbkdf2_100000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 100000 iterations. +| `pbkdf2_500000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 500000 iterations. +| `pbkdf2_1000000` | | | Uses `PBKDF2` key derivation function with `HMAC-SHA512` as a + pseudorandom function using 1000000 iterations. +|======================= + + diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 9aa4483a8f200..f1d8b555d562a 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -46,12 +46,21 @@ settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already omits all `ssl` settings, `bind_dn`, and `bind_password` due to the sensitive nature of the information. +`xpack.security.fips_mode.enabled`:: +Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. + [float] [[password-security-settings]] ==== Default password security settings `xpack.security.authc.accept_default_password`:: In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password. +[[password-hashing-settings]] +==== Password hashing settings +`xpack.security.authc.password_hashing.algorithm`:: +Specifies the hashing algorithm that is used for secure user credential storage. +See <>. Defaults to `bcrypt`. + [float] [[anonymous-access-settings]] ==== Anonymous access settings @@ -164,9 +173,8 @@ the standard {es} <>. Defaults to `20m`. cache at any given time. Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the -in-memory cached user credentials. For possible values, see -{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to -`ssha256`. +in-memory cached user credentials. For possible values, see <>. +Defaults to `ssha256`. [[ref-users-settings]] @@ -190,8 +198,7 @@ Defaults to 100,000. `cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the in-memory cached -user credentials. See the {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for -all possible values. Defaults to `ssha256`. +user credentials. See <>. Defaults to `ssha256`. [[ref-ldap-settings]] [float] @@ -444,8 +451,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for the -in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] -table for all possible values. Defaults to `ssha256`. +in-memory cached user credentials. See <>. Defaults to `ssha256`. [[ref-ad-settings]] [float] @@ -684,7 +690,7 @@ Defaults to `100000`. `cache.hash_algo`:: (Expert Setting) Specifies the hashing algorithm that is used for -the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`. +the in-memory cached user credentials. See <>. Defaults to `ssha256`. `follow_referrals`:: If set to `true` {security} follows referrals returned by the LDAP server. @@ -855,6 +861,15 @@ The maximum amount of skew that can be tolerated between the IdP's clock and the {es} node's clock. Defaults to `3m` (3 minutes). +`req_authn_context_class_ref`:: +A comma separated list of Authentication Context Class Reference values to be +included in the Requested Authentication Context when requesting the IdP to +authenticate the current user. The Authentication Context of the corresponding +authentication response should contain at least one of the requested values. ++ +For more information, see +{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods]. + [float] [[ref-saml-signing-settings]] ===== SAML realm signing settings @@ -1121,7 +1136,12 @@ settings such as those for HTTP or Transport. `xpack.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, -`TLSv1`. +`TLSv1`. ++ +-- +NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` +or `SSLv3`. See <>. +-- `xpack.ssl.client_authentication`:: Controls the server's behavior in regard to requesting a certificate @@ -1220,6 +1240,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use Java +keystore files. See <>. + [float] ===== PKCS#12 files @@ -1258,6 +1281,9 @@ Password to the truststore. `xpack.ssl.truststore.secure_password` (<>):: Password to the truststore. +WARNING: If `xpack.security.fips_mode.enabled` is `true`, you cannot use PKCS#12 +keystore files. See <>. + [[pkcs12-truststore-note]] [NOTE] Storing trusted certificates in a PKCS#12 file, although supported, is @@ -1335,3 +1361,5 @@ List of IP addresses to allow for this profile. `transport.profiles.$PROFILE.xpack.security.filter.deny`:: List of IP addresses to deny for this profile. + +include::security-hash-settings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index a8b8dd82d6172..f0e5cfc71c999 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -155,6 +155,11 @@ the kernel allows a process to have at least 262,144 memory-mapped areas and is enforced on Linux only. To pass the maximum map count check, you must configure `vm.max_map_count` via `sysctl` to be at least `262144`. +Alternatively, the maximum map count check is only needed if you are using +`mmapfs` as the <> for your indices. If you +<> the use of `mmapfs` then this bootstrap check will +not be enforced. + === Client JVM check There are two different JVMs provided by OpenJDK-derived JVMs: the diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index c0ebfb60fa7b2..26a207824af01 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -41,6 +41,8 @@ Elasticsearch website or from our RPM repository. `msi`:: +beta[] ++ The `msi` package is suitable for installation on Windows 64-bit systems with at least .NET 4.5 framework installed, and is the easiest choice for getting started with Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 2177440457acf..6abf5dea14d0e 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -91,9 +91,6 @@ using the `bin/elasticsearch-keystore add` command, call: [source,js] ---- POST _nodes/reload_secure_settings -{ - "secure_settings_password": "" -} ---- // CONSOLE This API will decrypt and re-read the entire keystore, on every cluster node, diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 00d6d96ef0d59..b1f3b338255c4 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' @@ -34,5 +31,5 @@ test.enabled = false jarHell.enabled = false forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 2017c2a418ac4..cc5c1e20fc162 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -91,7 +89,7 @@ dependencies { forbiddenApisMain { // :libs:core does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index c09a2a4ebd1b3..853c78646c25b 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -33,7 +31,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 61437be6aff13..37b494624eddb 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -34,7 +32,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/nio/build.gradle b/libs/nio/build.gradle index 43c9a133a3f37..f6a6ff652450f 100644 --- a/libs/nio/build.gradle +++ b/libs/nio/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -62,5 +59,5 @@ if (isEclipse) { forbiddenApisMain { // nio does not depend on core, so only jdk signatures should be checked // es-all is not checked as we connect and accept sockets - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 93fdfd01c8f0c..3baf3513b1206 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -47,7 +44,7 @@ dependencies { } forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index c8b37108ff93c..0ec4e0d6ad312 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -57,7 +55,7 @@ dependencies { forbiddenApisMain { // x-content does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } if (isEclipse) { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index 1d2b8a36810eb..b5dc23fbdb893 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -64,7 +64,7 @@ public void testNGramDeprecationWarning() throws IOException { public void testNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -104,7 +104,7 @@ public void testEdgeNGramDeprecationWarning() throws IOException { public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java index 0d5389a6d6594..e284877978851 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java @@ -60,7 +60,7 @@ public void testDeprecationWarning() throws IOException { public void testNoDeprecationWarningPre6_3() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_2_4)) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index caa9fa4831add..eea9e31d4a79d 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -120,21 +119,17 @@ public MultiSearchTemplateRequest indicesOptions(IndicesOptions indicesOptions) @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - maxConcurrentSearchRequests = in.readVInt(); - } + maxConcurrentSearchRequests = in.readVInt(); requests = in.readStreamableList(SearchTemplateRequest::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeVInt(maxConcurrentSearchRequests); - } + out.writeVInt(maxConcurrentSearchRequests); out.writeStreamableList(requests); } - + @Override public boolean equals(Object o) { if (this == o) return true; @@ -148,9 +143,9 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions); - } - - public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, + } + + public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearchTemplateRequest, XContent xContent) throws IOException { ByteArrayOutputStream output = new ByteArrayOutputStream(); for (SearchTemplateRequest templateRequest : multiSearchTemplateRequest.requests()) { @@ -168,5 +163,5 @@ public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearch } return output.toByteArray(); } - + } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 3381356da4171..e37a796009137 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.join.JoinUtil; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -125,15 +124,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeInt(maxChildren); out.writeVInt(scoreMode.ordinal()); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToParentChildBWC(out, query, type); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index 4e328ea2c984e..e98fdb9e9699d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -97,15 +96,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(type); out.writeBoolean(score); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToParentChildBWC(out, query, type); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 6593c7efb9fab..5e57a2774055d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -131,7 +131,7 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); } - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 546677a2be4f4..6e4e79d16e5a5 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -196,10 +196,6 @@ protected void doAssertLuceneQuery(HasChildQueryBuilder queryBuilder, Query quer public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { HasChildQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 6d6822007eee3..164405f653444 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -171,10 +171,6 @@ protected void doAssertLuceneQuery(HasParentQueryBuilder queryBuilder, Query que public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { HasParentQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index f18efe4585bc9..445076b8eba07 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -272,11 +272,7 @@ public PercolateQueryBuilder(String field, String documentType, String indexedDo documents = document != null ? Collections.singletonList(document) : Collections.emptyList(); } if (documents.isEmpty() == false) { - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - documentXContentType = in.readEnum(XContentType.class); - } else { - documentXContentType = XContentHelper.xContentType(documents.iterator().next()); - } + documentXContentType = in.readEnum(XContentType.class); } else { documentXContentType = null; } @@ -329,7 +325,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { BytesReference doc = documents.isEmpty() ? null : documents.iterator().next(); out.writeOptionalBytesReference(doc); } - if (documents.isEmpty() == false && out.getVersion().onOrAfter(Version.V_5_3_0)) { + if (documents.isEmpty() == false) { out.writeEnum(documentXContentType); } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e7163edef94c9..eb7af5f30d061 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -36,7 +35,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -57,7 +55,6 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -294,26 +291,6 @@ public void testCreateMultiDocumentSearcher() throws Exception { assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); } - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("P4AAAAAFZmllbGQEdHlwZQAAAAAAAA57ImZvbyI6ImJhciJ9AAAAAA=="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PercolateQueryBuilder queryBuilder = new PercolateQueryBuilder(in); - assertEquals("type", queryBuilder.getDocumentType()); - assertEquals("field", queryBuilder.getField()); - assertEquals("{\"foo\":\"bar\"}", queryBuilder.getDocuments().iterator().next().utf8ToString()); - assertEquals(XContentType.JSON, queryBuilder.getXContentType()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - queryBuilder.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - private static BytesReference randomSource(Set usedFields) { try { // If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index 5e97eadae83e7..1c7ae3681ac63 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -74,7 +74,7 @@ public void testStoringQueryBuilders() throws IOException { BinaryFieldMapper fieldMapper = PercolatorFieldMapper.Builder.createQueryBuilderFieldBuilder( new Mapper.BuilderContext(settings, new ContentPath(0))); - Version version = randomBoolean() ? Version.V_5_6_0 : Version.V_6_0_0_beta2; + Version version = Version.V_6_0_0_beta2; try (IndexWriter indexWriter = new IndexWriter(directory, config)) { for (int i = 0; i < queryBuilders.length; i++) { queryBuilders[i] = new TermQueryBuilder(randomAlphaOfLength(4), randomAlphaOfLength(8)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index e8e3760882eea..d20be74798066 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -61,7 +61,8 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, if (searchRequest.scroll() != null) { TimeValue keepAlive = searchRequest.scroll().keepAlive(); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up because more scroll * timeout seems safer than less. */ @@ -117,7 +118,8 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) { fields.append(',').append(searchRequest.source().storedFields().fieldNames().get(i)); } - String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; + // V_5_0_0 + String storedFieldsParamName = remoteVersion.before(Version.fromId(5000099)) ? "fields" : "stored_fields"; request.addParameter(storedFieldsParamName, fields.toString()); } @@ -186,7 +188,8 @@ private static String sortToUri(SortBuilder sort) { static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) { Request request = new Request("POST", "/_search/scroll"); - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros * so we toss out that resolution, rounding up so we shouldn't end up * with 0s. */ diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 97809c9bc8dc3..0efedf449b562 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -155,13 +155,8 @@ private void assertRequestEquals(Version version, ReindexRequest request, Reinde assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - if (version.onOrAfter(Version.V_5_2_0)) { - assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); - } else { - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, tripped.getRemoteInfo().getConnectTimeout()); - } + assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); + assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index b51525f20e3c2..2f801811327b8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -136,13 +136,15 @@ public void testInitialSearchParamsFields() { // Test stored_fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id)); + // V_5_0_0_alpha4 => current + remoteVersion = Version.fromId(between(5000004, Version.CURRENT.id)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("stored_fields", "_source,_id")); // Test fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - remoteVersion = Version.fromId(between(2000099, Version.V_5_0_0_alpha4_ID - 1)); + // V_2_0_0 => V_5_0_0_alpha3 + remoteVersion = Version.fromId(between(2000099, 5000003)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("fields", "_source,_id")); // Test extra fields for versions that need it @@ -190,7 +192,8 @@ public void testInitialSearchParamsMisc() { } private void assertScroll(Version remoteVersion, Map params, TimeValue requested) { - if (remoteVersion.before(Version.V_5_0_0)) { + // V_5_0_0 + if (remoteVersion.before(Version.fromId(5000099))) { // Versions of Elasticsearch prior to 5.0 can't parse nanos or micros in TimeValue. assertThat(params.get("scroll"), not(either(endsWith("nanos")).or(endsWith("micros")))); if (requested.getStringRep().endsWith("nanos") || requested.getStringRep().endsWith("micros")) { @@ -242,7 +245,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); - HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.V_5_0_0).getEntity(); + HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); @@ -255,7 +258,7 @@ public void testScrollEntity() throws IOException { public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); - Request request = clearScroll(scroll, Version.V_5_0_0); + Request request = clearScroll(scroll, Version.fromString("5.0.0")); assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 92f370f8f6364..d3d3cefea45e1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -150,13 +150,15 @@ public void testLookupRemoteVersion() throws Exception { assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/with_unknown_fields.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_5_0_0_alpha3, v); + // V_5_0_0_alpha3 + assertEquals(Version.fromId(5000003), v); called.set(true); }); assertTrue(called.get()); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 981a417449f14..73135c2a14560 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -21,11 +21,11 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelPromise; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpResponse; -import org.elasticsearch.transport.netty4.Netty4Utils; import java.net.InetSocketAddress; @@ -42,7 +42,7 @@ public class Netty4HttpChannel implements HttpChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); @@ -59,7 +59,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index ab078ad10d337..472e34d09fc40 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -27,7 +27,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.http.HttpPipelinedRequest; -import org.elasticsearch.transport.netty4.Netty4Utils; @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { @@ -58,7 +57,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest listener) listener.onResponse(null); } else { final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { listener.onFailure(new Exception(cause)); } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java index 873a6c33fba11..9ef3f296f0601 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpServerChannel; @@ -41,7 +42,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel { } else { Throwable cause = f.cause(); if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); closeContext.completeExceptionally(new Exception(cause)); } else { closeContext.completeExceptionally((Exception) cause); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 7eb34bcdcd3aa..0edd12a44e8c1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -38,6 +38,7 @@ import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; @@ -228,7 +229,7 @@ protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener channels) throws IOEx } } - /** - * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. - * - * @param cause the throwable to test - */ - public static void maybeDie(final Throwable cause) { - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); - final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); - if (maybeError.isPresent()) { - /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. - */ - try { - // try to log the current stack trace - final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); - logger.error("fatal error on the network layer\n{}", formatted); - } finally { - new Thread( - () -> { - throw maybeError.get(); - }) - .start(); - } - } - } - } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 1883e3bf1b9d6..676fd44813151 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -22,7 +24,7 @@ esplugin { classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' } -forbiddenApis { +tasks.withType(ForbiddenApisCliTask) { signatures += [ "com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead" ] diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index c4c44222f470e..0235e6e81368f 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -25,7 +25,6 @@ import com.ibm.icu.util.ULocale; import org.apache.lucene.document.Field; -import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -35,7 +34,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -56,7 +54,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.function.BiFunction; import java.util.function.LongSupplier; public class ICUCollationKeywordFieldMapper extends FieldMapper { @@ -571,7 +568,6 @@ public static class TypeParser implements Mapper.TypeParser { private final String variableTop; private final boolean hiraganaQuaternaryMode; private final Collator collator; - private final BiFunction getDVField; protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, String rules, String language, @@ -593,11 +589,6 @@ protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fiel this.variableTop = variableTop; this.hiraganaQuaternaryMode = hiraganaQuaternaryMode; this.collator = collator; - if (indexCreatedVersion.onOrAfter(Version.V_5_6_0)) { - getDVField = SortedSetDocValuesField::new; - } else { - getDVField = SortedDocValuesField::new; - } } @Override @@ -754,7 +745,7 @@ protected void parseCreateField(ParseContext context, List field } if (fieldType().hasDocValues()) { - fields.add(getDVField.apply(fieldType().name(), binaryValue)); + fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); } else if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { createFieldNamesField(context, fields); } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index fff255970113d..f39ae886dc45b 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -28,11 +28,9 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -106,50 +104,6 @@ public void testDefaults() throws Exception { assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); } - public void testBackCompat() throws Exception { - indexService = createIndex("oldindex", Settings.builder().put("index.version.created", Version.V_5_5_0).build()); - parser = indexService.mapperService().documentMapperParser(); - - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() - .endObject().endObject()); - - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - - assertEquals(mapping, mapper.mappingSource().toString()); - - ParsedDocument doc = mapper.parse(SourceToParse.source("oldindex", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "1234") - .endObject()), - XContentType.JSON)); - - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - - Collator collator = Collator.getInstance(ULocale.ROOT); - RawCollationKey key = collator.getRawCollationKey("1234", null); - BytesRef expected = new BytesRef(key.bytes, 0, key.size); - - assertEquals(expected, fields[0].binaryValue()); - IndexableFieldType fieldType = fields[0].fieldType(); - assertThat(fieldType.omitNorms(), equalTo(true)); - assertFalse(fieldType.tokenized()); - assertFalse(fieldType.stored()); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); - assertThat(fieldType.storeTermVectors(), equalTo(false)); - assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); - assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); - assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); - assertEquals(DocValuesType.NONE, fieldType.docValuesType()); - - assertEquals(expected, fields[1].binaryValue()); - fieldType = fields[1].fieldType(); - assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE)); - assertEquals(DocValuesType.SORTED, fieldType.docValuesType()); - } - public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index a6dc27b1f8a1c..50af824fae9bd 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -93,10 +92,6 @@ public static class TypeParser implements Mapper.TypeParser { throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); } - if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha2)) { - node.remove("precision_step"); - } - TypeParsers.parseField(builder, name, node, parserContext); return builder; diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 04ab7ecd245f6..ac5afeb3a1094 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -82,10 +82,6 @@ public Builder enabled(EnabledAttributeMapper enabled) { @Override public SizeFieldMapper build(BuilderContext context) { setupFieldType(context); - if (context.indexCreatedVersion().onOrBefore(Version.V_5_0_0_alpha4)) { - // Make sure that the doc_values are disabled on indices created before V_5_0_0_alpha4 - fieldType.setHasDocValues(false); - } return new SizeFieldMapper(enabledState, fieldType, context.indexSettings()); } } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 07ef4b4be5e62..510c101379d2f 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -23,28 +23,38 @@ esplugin { } dependencies { - compile 'com.google.cloud:google-cloud-storage:1.28.0' - compile 'com.google.cloud:google-cloud-core:1.28.0' - compile 'com.google.cloud:google-cloud-core-http:1.28.0' - compile 'com.google.auth:google-auth-library-oauth2-http:0.9.1' - compile 'com.google.auth:google-auth-library-credentials:0.9.1' - compile 'com.google.oauth-client:google-oauth-client:1.23.0' - compile 'com.google.http-client:google-http-client:1.23.0' - compile 'com.google.http-client:google-http-client-jackson:1.23.0' - compile 'com.google.http-client:google-http-client-jackson2:1.23.0' - compile 'com.google.http-client:google-http-client-appengine:1.23.0' - compile 'com.google.api-client:google-api-client:1.23.0' - compile 'com.google.api:gax:1.25.0' - compile 'com.google.api:gax-httpjson:0.40.0' - compile 'com.google.api:api-common:1.5.0' - compile 'com.google.api.grpc:proto-google-common-protos:1.8.0' + compile 'com.google.cloud:google-cloud-storage:1.40.0' + compile 'com.google.cloud:google-cloud-core:1.40.0' compile 'com.google.guava:guava:20.0' - compile 'com.google.apis:google-api-services-storage:v1-rev115-1.23.0' - compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' - compile 'io.grpc:grpc-context:1.9.0' - compile 'io.opencensus:opencensus-api:0.11.1' - compile 'io.opencensus:opencensus-contrib-http-util:0.11.1' - compile 'org.threeten:threetenbp:1.3.6' + compile 'joda-time:joda-time:2.10' + compile 'com.google.http-client:google-http-client:1.24.1' + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile 'com.google.api:api-common:1.7.0' + compile 'com.google.api:gax:1.30.0' + compile 'org.threeten:threetenbp:1.3.3' + compile 'com.google.protobuf:protobuf-java-util:3.6.0' + compile 'com.google.protobuf:protobuf-java:3.6.0' + compile 'com.google.code.gson:gson:2.7' + compile 'com.google.api.grpc:proto-google-common-protos:1.12.0' + compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + compile 'com.google.cloud:google-cloud-core-http:1.40.0' + compile 'com.google.auth:google-auth-library-credentials:0.10.0' + compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0' + compile 'com.google.oauth-client:google-oauth-client:1.24.1' + compile 'com.google.api-client:google-api-client:1.24.1' + compile 'com.google.http-client:google-http-client-appengine:1.24.1' + compile 'com.google.http-client:google-http-client-jackson:1.24.1' + compile 'org.codehaus.jackson:jackson-core-asl:1.9.11' + compile 'com.google.http-client:google-http-client-jackson2:1.24.1' + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile 'com.google.api:gax-httpjson:0.47.0' + compile 'io.opencensus:opencensus-api:0.15.0' + compile 'io.grpc:grpc-context:1.12.0' + compile 'io.opencensus:opencensus-contrib-http-util:0.15.0' + compile 'com.google.apis:google-api-services-storage:v1-rev135-1.24.1' } dependencyLicenses { @@ -52,10 +62,18 @@ dependencyLicenses { mapping from: /google-auth-.*/, to: 'google-auth' mapping from: /google-http-.*/, to: 'google-http' mapping from: /opencensus.*/, to: 'opencensus' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /http.*/, to: 'httpclient' + mapping from: /protobuf.*/, to: 'protobuf' + mapping from: /proto-google.*/, to: 'proto-google' } thirdPartyAudit.excludes = [ // uses internal java api: sun.misc.Unsafe + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', @@ -87,139 +105,13 @@ thirdPartyAudit.excludes = [ 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', - 'com.google.gson.Gson', - 'com.google.gson.GsonBuilder', - 'com.google.gson.TypeAdapter', - 'com.google.gson.stream.JsonReader', - 'com.google.gson.stream.JsonWriter', - 'com.google.iam.v1.Binding$Builder', - 'com.google.iam.v1.Binding', - 'com.google.iam.v1.Policy$Builder', - 'com.google.iam.v1.Policy', - 'com.google.protobuf.AbstractMessageLite$Builder', - 'com.google.protobuf.AbstractParser', - 'com.google.protobuf.Any$Builder', - 'com.google.protobuf.Any', - 'com.google.protobuf.AnyOrBuilder', - 'com.google.protobuf.AnyProto', - 'com.google.protobuf.Api$Builder', - 'com.google.protobuf.Api', - 'com.google.protobuf.ApiOrBuilder', - 'com.google.protobuf.ApiProto', - 'com.google.protobuf.ByteString', - 'com.google.protobuf.CodedInputStream', - 'com.google.protobuf.CodedOutputStream', - 'com.google.protobuf.DescriptorProtos', - 'com.google.protobuf.Descriptors$Descriptor', - 'com.google.protobuf.Descriptors$EnumDescriptor', - 'com.google.protobuf.Descriptors$EnumValueDescriptor', - 'com.google.protobuf.Descriptors$FieldDescriptor', - 'com.google.protobuf.Descriptors$FileDescriptor$InternalDescriptorAssigner', - 'com.google.protobuf.Descriptors$FileDescriptor', - 'com.google.protobuf.Descriptors$OneofDescriptor', - 'com.google.protobuf.Duration$Builder', - 'com.google.protobuf.Duration', - 'com.google.protobuf.DurationOrBuilder', - 'com.google.protobuf.DurationProto', - 'com.google.protobuf.EmptyProto', - 'com.google.protobuf.Enum$Builder', - 'com.google.protobuf.Enum', - 'com.google.protobuf.EnumOrBuilder', - 'com.google.protobuf.ExtensionRegistry', - 'com.google.protobuf.ExtensionRegistryLite', - 'com.google.protobuf.FloatValue$Builder', - 'com.google.protobuf.FloatValue', - 'com.google.protobuf.FloatValueOrBuilder', - 'com.google.protobuf.GeneratedMessage$GeneratedExtension', - 'com.google.protobuf.GeneratedMessage', - 'com.google.protobuf.GeneratedMessageV3$Builder', - 'com.google.protobuf.GeneratedMessageV3$BuilderParent', - 'com.google.protobuf.GeneratedMessageV3$FieldAccessorTable', - 'com.google.protobuf.GeneratedMessageV3', - 'com.google.protobuf.Internal$EnumLite', - 'com.google.protobuf.Internal$EnumLiteMap', - 'com.google.protobuf.Internal', - 'com.google.protobuf.InvalidProtocolBufferException', - 'com.google.protobuf.LazyStringArrayList', - 'com.google.protobuf.LazyStringList', - 'com.google.protobuf.MapEntry$Builder', - 'com.google.protobuf.MapEntry', - 'com.google.protobuf.MapField', - 'com.google.protobuf.Message', - 'com.google.protobuf.MessageOrBuilder', - 'com.google.protobuf.Parser', - 'com.google.protobuf.ProtocolMessageEnum', - 'com.google.protobuf.ProtocolStringList', - 'com.google.protobuf.RepeatedFieldBuilderV3', - 'com.google.protobuf.SingleFieldBuilderV3', - 'com.google.protobuf.Struct$Builder', - 'com.google.protobuf.Struct', - 'com.google.protobuf.StructOrBuilder', - 'com.google.protobuf.StructProto', - 'com.google.protobuf.Timestamp$Builder', - 'com.google.protobuf.Timestamp', - 'com.google.protobuf.TimestampProto', - 'com.google.protobuf.Type$Builder', - 'com.google.protobuf.Type', - 'com.google.protobuf.TypeOrBuilder', - 'com.google.protobuf.TypeProto', - 'com.google.protobuf.UInt32Value$Builder', - 'com.google.protobuf.UInt32Value', - 'com.google.protobuf.UInt32ValueOrBuilder', - 'com.google.protobuf.UnknownFieldSet$Builder', - 'com.google.protobuf.UnknownFieldSet', - 'com.google.protobuf.WireFormat$FieldType', - 'com.google.protobuf.WrappersProto', - 'com.google.protobuf.util.Timestamps', - 'org.apache.http.ConnectionReuseStrategy', - 'org.apache.http.Header', - 'org.apache.http.HttpEntity', - 'org.apache.http.HttpEntityEnclosingRequest', - 'org.apache.http.HttpHost', - 'org.apache.http.HttpRequest', - 'org.apache.http.HttpResponse', - 'org.apache.http.HttpVersion', - 'org.apache.http.RequestLine', - 'org.apache.http.StatusLine', - 'org.apache.http.client.AuthenticationHandler', - 'org.apache.http.client.HttpClient', - 'org.apache.http.client.HttpRequestRetryHandler', - 'org.apache.http.client.RedirectHandler', - 'org.apache.http.client.RequestDirector', - 'org.apache.http.client.UserTokenHandler', - 'org.apache.http.client.methods.HttpDelete', - 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', - 'org.apache.http.client.methods.HttpGet', - 'org.apache.http.client.methods.HttpHead', - 'org.apache.http.client.methods.HttpOptions', - 'org.apache.http.client.methods.HttpPost', - 'org.apache.http.client.methods.HttpPut', - 'org.apache.http.client.methods.HttpRequestBase', - 'org.apache.http.client.methods.HttpTrace', - 'org.apache.http.conn.ClientConnectionManager', - 'org.apache.http.conn.ConnectionKeepAliveStrategy', - 'org.apache.http.conn.params.ConnManagerParams', - 'org.apache.http.conn.params.ConnPerRouteBean', - 'org.apache.http.conn.params.ConnRouteParams', - 'org.apache.http.conn.routing.HttpRoutePlanner', - 'org.apache.http.conn.scheme.PlainSocketFactory', - 'org.apache.http.conn.scheme.Scheme', - 'org.apache.http.conn.scheme.SchemeRegistry', - 'org.apache.http.conn.ssl.SSLSocketFactory', - 'org.apache.http.conn.ssl.X509HostnameVerifier', - 'org.apache.http.entity.AbstractHttpEntity', - 'org.apache.http.impl.client.DefaultHttpClient', - 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', - 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', - 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', - 'org.apache.http.message.BasicHttpResponse', - 'org.apache.http.params.BasicHttpParams', - 'org.apache.http.params.HttpConnectionParams', - 'org.apache.http.params.HttpParams', - 'org.apache.http.params.HttpProtocolParams', - 'org.apache.http.protocol.HttpContext', - 'org.apache.http.protocol.HttpProcessor', - 'org.apache.http.protocol.HttpRequestExecutor' + // commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + // commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' ] check { diff --git a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 deleted file mode 100644 index 64435356e5eaf..0000000000000 --- a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e537338d40a57ad469239acb6d828fa544fb52b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..67291b658e5c5 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 @@ -0,0 +1 @@ +ea59fb8b2450999345035dec8a6f472543391766 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 0000000000000..3fe8682a1b0f9 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-codec-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-codec-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt rename to plugins/repository-gcs/licenses/commons-logging-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt rename to plugins/repository-gcs/licenses/commons-logging-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 deleted file mode 100644 index 594177047c140..0000000000000 --- a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36ab73c0b5d4a67447eb89a3174cc76ced150bd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 new file mode 100644 index 0000000000000..d6d2bb20ed840 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 @@ -0,0 +1 @@ +58fa2feb11b092be0a6ebe705a28736f12374230 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 deleted file mode 100644 index c251ea1dd956c..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb4bafbfd45b9d24efbb6138a31e37918fac015f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 new file mode 100644 index 0000000000000..fdc722d1520d6 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 @@ -0,0 +1 @@ +d096f3142eb3adbf877588d1044895d148d9efcb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b91f..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..27dafe58a0182 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +37de23fb9b8b077de4ecec3192d98e752b0e5d72 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 deleted file mode 100644 index 9f6f77ada3a69..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba4fb6c5dc8d5ad94dedd9927ceee10a31a59abd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..e3042ee6ea07e --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 @@ -0,0 +1 @@ +28d3d391dfc7e7e7951760708ad2f48cecacf38f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 new file mode 100644 index 0000000000000..c8258d69326b8 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 @@ -0,0 +1 @@ +f981288bd84fe6d140ed70d1d8dbe994a64fa3cc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 deleted file mode 100644 index 0922a53d2e356..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25e0f45f3b3d1b4fccc8944845e51a7a4f359652 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 new file mode 100644 index 0000000000000..f55ef7c9c2150 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 @@ -0,0 +1 @@ +c079a62086121973a23d90f54e2b8c13050fa39d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 deleted file mode 100644 index 100a44c187218..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0fe3a39b0f28d59de1986b3c50f018cd7cb9ec2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 deleted file mode 100644 index 071533f227839..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0e88c78ce17c92d76bf46345faf3fa68833b216 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..7562ead12e9f9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.40.0.jar.sha1 @@ -0,0 +1 @@ +4985701f989030e262cf8f4e38cc954115f5b082 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 deleted file mode 100644 index fed3fc257c32c..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b4559a9513abd98da50958c56a10f8ae00cb0f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..2761bfdc745c6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.40.0.jar.sha1 @@ -0,0 +1 @@ +67f5806beda32894f1e6c9527925b64199fd2e4f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 deleted file mode 100644 index f49152ea05646..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -226019ae816b42c59f1b06999aeeb73722b87200 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..33e83b73712f7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.40.0.jar.sha1 @@ -0,0 +1 @@ +fabefef46f07d1e334123f0de17702708b4dfbd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a15f..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..46b99f23e470a --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +396eac8d3fb1332675f82b208f48a469d64f3b4a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 deleted file mode 100644 index 823c3a85089a5..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eda0d0f758c1cc525866e52e1226c4eb579d130 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..e39f63fe33ae3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 @@ -0,0 +1 @@ +8535031ae10bf6a196e68f25e10c0d6382699cb6 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 deleted file mode 100644 index 85ba0ab798d05..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a72ea3a197937ef63a893e73df312dac0d813663 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..f6b9694abaa6c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 @@ -0,0 +1 @@ +02c88e77c14effdda76f02a0eac968de74e0bd4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f04..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..634b7d9198c8e --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 @@ -0,0 +1 @@ +2ad1dffd8a450055e68d8004fe003033b751d761 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 deleted file mode 100644 index 036812b88b5e0..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e57ea1e2220bda5a2bd24ff17860212861f3c5cf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 new file mode 100644 index 0000000000000..2d89939674a51 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 @@ -0,0 +1 @@ +7b0e0218b96808868c23a7d0b40566a713931d9f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..57f37a81c960f --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.12.0.jar.sha1 @@ -0,0 +1 @@ +5b63a170b786051a42cce08118d5ea3c8f60f749 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 deleted file mode 100644 index 02bac0e492074..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28b0836f48c9705abf73829bbc536dba29a1329a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 new file mode 100644 index 0000000000000..b3433f306eb3f --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 @@ -0,0 +1 @@ +751f548c85fa49f330cecbb1875893f971b33c4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt b/plugins/repository-gcs/licenses/gson-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt rename to plugins/repository-gcs/licenses/gson-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt b/plugins/repository-gcs/licenses/gson-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt rename to plugins/repository-gcs/licenses/gson-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..6937112a09fb6 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt rename to plugins/repository-gcs/licenses/httpclient-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/httpclient-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt rename to plugins/repository-gcs/licenses/httpclient-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 new file mode 100644 index 0000000000000..581726601745b --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 @@ -0,0 +1 @@ +e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-LICENSE b/plugins/repository-gcs/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-gcs/licenses/jackson-NOTICE b/plugins/repository-gcs/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 new file mode 100644 index 0000000000000..ed70030899aa0 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 @@ -0,0 +1 @@ +e32303ef8bd18a5c9272780d49b81c95e05ddf43 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 deleted file mode 100644 index c5016bf828d60..0000000000000 --- a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c304d70f42f832e0a86d45bd437f692129299a4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/old/google-LICENSE.txt b/plugins/repository-gcs/licenses/old/google-LICENSE.txt deleted file mode 100644 index 980a15ac24eeb..0000000000000 --- a/plugins/repository-gcs/licenses/old/google-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/repository-gcs/licenses/old/google-NOTICE.txt b/plugins/repository-gcs/licenses/old/google-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3fce..0000000000000 --- a/plugins/repository-gcs/licenses/old/google-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt deleted file mode 100644 index 72819a9f06f2a..0000000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt +++ /dev/null @@ -1,241 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -========================================================================= - -This project contains annotations in the package org.apache.http.annotation -which are derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) -Full text: http://creativecommons.org/licenses/by/2.5/legalcode - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. - -1. Definitions - - "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. - "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. - "Licensor" means the individual or entity that offers the Work under the terms of this License. - "Original Author" means the individual or entity who created the Work. - "Work" means the copyrightable work of authorship offered under the terms of this License. - "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. - -2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: - - to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; - to create and reproduce Derivative Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. - - For the avoidance of doubt, where the work is a musical composition: - Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. - Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). - Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). - -The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. - -4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: - - You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. - If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. - Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. - -8. Miscellaneous - - Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. - Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. - If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. - This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt deleted file mode 100644 index c0be50a505ec1..0000000000000 --- a/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt +++ /dev/null @@ -1,8 +0,0 @@ -Apache HttpComponents Core -Copyright 2005-2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 deleted file mode 100644 index 61d8e3b148144..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54689fbf750a7f26e34fa1f1f96b883c53f51486 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 new file mode 100644 index 0000000000000..e200e2e24a7df --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 @@ -0,0 +1 @@ +9a098392b287d7924660837f4eba0ce252013683 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 deleted file mode 100644 index c0b04f0f8ccce..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82e572b41e81ecf58d0d1e9a3953a05aa8f9c84b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 new file mode 100644 index 0000000000000..b642e1ebebd59 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 @@ -0,0 +1 @@ +d88690591669d9b5ba6d91d9eac7736e58ccf3da \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt b/plugins/repository-gcs/licenses/proto-google-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt rename to plugins/repository-gcs/licenses/proto-google-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt b/plugins/repository-gcs/licenses/proto-google-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt rename to plugins/repository-gcs/licenses/proto-google-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..47f3c178a68c6 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 @@ -0,0 +1 @@ +1140cc74df039deb044ed0e320035e674dc13062 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 deleted file mode 100644 index 0a2dee4447e92..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3282312ba82536fc9a7778cabfde149a875e877 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 new file mode 100644 index 0000000000000..2bfae3456d499 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 @@ -0,0 +1 @@ +ea312c0250a5d0a7cdd1b20bc2c3259938b79855 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-LICENSE.txt b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-LICENSE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-NOTICE.txt b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-NOTICE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 new file mode 100644 index 0000000000000..050ebd44c9282 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 @@ -0,0 +1 @@ +5333f7e422744d76840c08a106e28e519fbe3acd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 new file mode 100644 index 0000000000000..cc85974499a65 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 @@ -0,0 +1 @@ +3680d0042d4fe0b95ada844ff24da0698a7f0773 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 new file mode 100644 index 0000000000000..9273043e14520 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.3.3.jar.sha1 @@ -0,0 +1 @@ +3ea31c96676ff12ab56be0b1af6fff61d1a4f1f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 deleted file mode 100644 index 65c16fed4a07b..0000000000000 --- a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89dcc04a7e028c3c963413a71f950703cf51f057 \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 3dcd59cf8e28c..17a5c1fb97e80 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -139,7 +139,7 @@ private void handleRequest(Object msg) { if (request.decoderResult().isFailure()) { Throwable cause = request.decoderResult().cause(); if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause)); } else { transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index 41cb72aa32273..133206e1322d4 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -73,7 +73,7 @@ public void close() throws Exception { closeFuture.await(); if (closeFuture.isSuccess() == false) { Throwable cause = closeFuture.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); throw (Exception) cause; } } @@ -84,7 +84,7 @@ public void addCloseListener(BiConsumer listener) { listener.accept(null, null); } else { final Throwable cause = f.cause(); - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); assert cause instanceof Exception; listener.accept(null, (Exception) cause); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java index 2cdaa4708d15a..637bbafff8eaf 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -223,7 +223,7 @@ public static NettyListener fromBiConsumer(BiConsumer biConsume biConsumer.accept(null, null); } else { if (cause instanceof Error) { - ExceptionsHelper.dieOnError(cause); + ExceptionsHelper.maybeDieOnAnotherThread(cause); biConsumer.accept(null, new Exception(cause)); } else { biConsumer.accept(null, (Exception) cause); diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index d9de422bb43e1..c1f2bc9627108 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' dependencies { - testCompile project(path: ':client:rest-high-level', configuration: 'shadow') + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 992d3ce71f623..9250122025c0a 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -90,14 +90,14 @@ public void testDieWithDignity() throws Exception { final Iterator it = lines.iterator(); - boolean fatalErrorOnTheNetworkLayer = false; + boolean fatalError = false; boolean fatalErrorInThreadExiting = false; - while (it.hasNext() && (fatalErrorOnTheNetworkLayer == false || fatalErrorInThreadExiting == false)) { + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final String line = it.next(); - if (line.contains("fatal error on the network layer")) { - fatalErrorOnTheNetworkLayer = true; - } else if (line.matches(".*\\[ERROR\\]\\[o.e.b.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { + fatalError = true; + } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); @@ -105,7 +105,7 @@ public void testDieWithDignity() throws Exception { } } - assertTrue(fatalErrorOnTheNetworkLayer); + assertTrue(fatalError); assertTrue(fatalErrorInThreadExiting); } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 0b936e44e5beb..d7111f64a1baf 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -908,9 +908,6 @@ public void testHistoryUUIDIsAdded() throws Exception { private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); - if (false == (runningAgainstOldCluster && oldClusterVersion.before(Version.V_5_5_0))) { - listSnapshotRequest.addParameter("verbose", "true"); - } Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", listSnapshotResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", listSnapshotResponse)); diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4a0c91469629d..4c3b48cbac946 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -69,9 +67,7 @@ esvagrant { } forbiddenApisMain { - signaturesURLs = [ - PrecommitTasks.getResource('/forbidden/jdk-signatures.txt') - ] + replaceSignatureFiles 'jdk-signatures' } // we don't have additional tests for the tests themselves diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 090c429fd82c0..13281a2a232f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -33,6 +33,11 @@ "type" : "number", "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "default" : 128 + }, + "max_concurrent_shard_requests" : { + "type" : "number", + "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : "The default grows with the number of nodes in the cluster but is at most 256." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index 536e2bfaf9495..fb884ddfca2c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -61,3 +61,35 @@ setup: - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } - match: { responses.3.error.root_cause.0.index: index_3 } - match: { responses.4.hits.total: 4 } + +--- +"Least impact smoke test": +# only passing these parameters to make sure they are consumed + - do: + max_concurrent_shard_requests: 1 + max_concurrent_searches: 1 + msearch: + body: + - index: index_* + - query: + match: {foo: foo} + - index: index_2 + - query: + match_all: {} + - index: index_1 + - query: + match: {foo: foo} + - index: index_3 + - query: + match_all: {} + - type: test + - query: + match_all: {} + + - match: { responses.0.hits.total: 2 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 1 } + - match: { responses.3.error.root_cause.0.type: index_not_found_exception } + - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.3.error.root_cause.0.index: index_3 } + - match: { responses.4.hits.total: 4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml index 5ecc357e0e167..6ab18146bba68 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/10_basic.yml @@ -233,3 +233,51 @@ query: match_all: {} size: 0 + +--- +"Scroll max_score is null": + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + indices.create: + index: test_scroll + - do: + index: + index: test_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_scroll + type: test + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + search: + index: test_scroll + size: 1 + scroll: 1m + sort: foo + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - length: {hits.hits: 1 } + - match: { hits.max_score: null } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - length: {hits.hits: 1 } + - match: { hits.max_score: null } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 521dc4c1cac8d..dad05cce4eb4a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -244,6 +244,23 @@ setup: - match: { hits.total: 6 } - length: { hits.hits: 0 } +--- +"no hits and inner_hits max_score null": + + - skip: + version: " - 6.99.99" + reason: max_score was set to 0 rather than null before 7.0 + + - do: + search: + index: test + body: + size: 0 + collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} } + sort: [{ sort: desc }] + + - match: { hits.max_score: null } + --- "field collapsing and multiple inner_hits": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index dc6b130b28957..c63dee2e211f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -128,7 +128,6 @@ setup: - match: { hits.total: 2 } - match: { aggregations.some_agg.doc_count: 3 } - - do: search: pre_filter_shard_size: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index dfe0b6825cdc5..62770e2915d23 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -39,6 +39,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} - do: @@ -52,6 +53,7 @@ setup: boost: 2 - match: {hits.total: 1} + - match: {hits.max_score: 2} - match: {hits.hits.0._score: 2} - do: @@ -61,6 +63,7 @@ setup: df: text - match: {hits.total: 1} + - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml index 24920580c4552..4d7ee91bef5f3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -29,6 +29,7 @@ query_weight: 5 rescore_query_weight: 10 + - match: {hits.max_score: 15} - match: { hits.hits.0._score: 15 } - match: { hits.hits.0._explanation.value: 15 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_nested.yml new file mode 100644 index 0000000000000..a10fc7b504bf0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_nested.yml @@ -0,0 +1,49 @@ +setup: + - do: + indices.create: + index: testidx + body: + mappings: + _doc: + properties: + nested1: + type : nested + properties: + nested1-text: + type: text + object1: + properties: + object1-text: + type: text + object1-nested1: + type: nested + properties: + object1-nested1-text: + type: text + - do: + index: + index: testidx + type: _doc + id: 1 + body: + "nested1" : [{ "nested1-text": "text1" }] + "object1" : [{ "object1-text": "text2" }, "object1-nested1" : [{"object1-nested1-text" : "text3"}]] + + - do: + indices.refresh: {} + +--- +"Termvectors on nested fields should return empty results": + + - do: + termvectors: + index: testidx + type: _doc + id: 1 + fields: ["nested1", "nested1.nested1-text", "object1.object1-nested1", "object1.object1-nested1.object1-nested1-text", "object1.object1-text"] + + - is_false: term_vectors.nested1 + - is_false: term_vectors.nested1\.nested1-text # escaping as the field name contains dot + - is_false: term_vectors.object1\.object1-nested1 + - is_false: term_vectors.object1\.object1-nested1\.object1-nested1-text + - is_true: term_vectors.object1\.object1-text diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 9a02b76b3e038..c009bb3818cc8 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -44,7 +44,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -137,17 +136,7 @@ public ElasticsearchException(StreamInput in) throws IOException { super(in.readOptionalString(), in.readException()); readStackTrace(this, in); headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - } else { - for (Iterator>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry> header = iterator.next(); - if (header.getKey().startsWith("es.")) { - metadata.put(header.getKey(), header.getValue()); - iterator.remove(); - } - } - } + metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); } /** @@ -287,15 +276,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(this.getMessage()); out.writeException(this.getCause()); writeStackTraces(this, out); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); - } else { - Map> finalHeaders = new HashMap<>(headers.size() + metadata.size()); - finalHeaders.putAll(headers); - finalHeaders.putAll(metadata); - out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { @@ -1018,11 +1000,11 @@ private enum ElasticsearchExceptionHandle { STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145, UNKNOWN_VERSION_ADDED), TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class, - org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1), + org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED), SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class, - org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2), + org.elasticsearch.env.ShardLockObtainFailedException::new, 147, UNKNOWN_VERSION_ADDED), UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, - org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, Version.V_5_2_0), + org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0_alpha1); diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 59eb8b60dadba..9f7566662174a 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -136,42 +136,6 @@ public static String formatStackTrace(final StackTraceElement[] stackTrace) { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - public static Optional maybeError(final Throwable cause, final Logger logger) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); - } - - final Queue queue = new LinkedList<>(); - queue.add(cause); - int iterations = 0; - while (!queue.isEmpty()) { - iterations++; - if (iterations > MAX_ITERATIONS) { - logger.warn("giving up looking for fatal errors", cause); - break; - } - final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); - } - Collections.addAll(queue, current.getSuppressed()); - if (current.getCause() != null) { - queue.add(current.getCause()); - } - } - return Optional.empty(); - } - /** * Rethrows the first exception in the list and adds all remaining to the suppressed list. * If the given list is empty no exception is thrown @@ -243,20 +207,57 @@ public static boolean reThrowIfNotNull(@Nullable Throwable e) { return true; } + static final int MAX_ITERATIONS = 1024; + + /** + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable + */ + public static Optional maybeError(final Throwable cause, final Logger logger) { + // early terminate if the cause is already an error + if (cause instanceof Error) { + return Optional.of((Error) cause); + } + + final Queue queue = new LinkedList<>(); + queue.add(cause); + int iterations = 0; + while (queue.isEmpty() == false) { + iterations++; + // this is a guard against deeply nested or circular chains of exceptions + if (iterations > MAX_ITERATIONS) { + logger.warn("giving up looking for fatal errors", cause); + break; + } + final Throwable current = queue.remove(); + if (current instanceof Error) { + return Optional.of((Error) current); + } + Collections.addAll(queue, current.getSuppressed()); + if (current.getCause() != null) { + queue.add(current.getCause()); + } + } + return Optional.empty(); + } + /** * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. + * caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See + * {@link #maybeError(Throwable, Logger)} for the semantics. * - * @param throwable the throwable to test + * @param throwable the throwable to possibly throw on another thread */ - public static void dieOnError(Throwable throwable) { - final Optional maybeError = ExceptionsHelper.maybeError(throwable, logger); - if (maybeError.isPresent()) { + public static void maybeDieOnAnotherThread(final Throwable throwable) { + ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> { /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack + * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here + * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the + * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause + * during exit. */ try { // try to log the current stack trace @@ -264,12 +265,12 @@ public static void dieOnError(Throwable throwable) { logger.error("fatal error\n{}", formatted); } finally { new Thread( - () -> { - throw maybeError.get(); - }) - .start(); + () -> { + throw error; + }) + .start(); } - } + }); } /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index a815a9711d023..7303e8d34c907 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -43,85 +43,6 @@ public class Version implements Comparable, ToXContentFragment { * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 * indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id */ - public static final int V_5_0_0_alpha1_ID = 5000001; - public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha2_ID = 5000002; - public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha3_ID = 5000003; - public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final int V_5_0_0_alpha4_ID = 5000004; - public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_alpha5_ID = 5000005; - public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_beta1_ID = 5000026; - public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_rc1_ID = 5000051; - public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); - public static final int V_5_0_1_ID = 5000199; - public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - public static final int V_5_0_2_ID = 5000299; - public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1); - // no version constant for 5.1.0 due to inadvertent release - public static final int V_5_1_1_ID = 5010199; - public static final Version V_5_1_1 = new Version(V_5_1_1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_1_2_ID = 5010299; - public static final Version V_5_1_2 = new Version(V_5_1_2_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); - public static final int V_5_2_0_ID = 5020099; - public static final Version V_5_2_0 = new Version(V_5_2_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_0); - public static final int V_5_2_1_ID = 5020199; - public static final Version V_5_2_1 = new Version(V_5_2_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_2_2_ID = 5020299; - public static final Version V_5_2_2 = new Version(V_5_2_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_0_ID = 5030099; - public static final Version V_5_3_0 = new Version(V_5_3_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_1); - public static final int V_5_3_1_ID = 5030199; - public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_2_ID = 5030299; - public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_3_3_ID = 5030399; - public static final Version V_5_3_3 = new Version(V_5_3_3_ID, org.apache.lucene.util.Version.LUCENE_6_4_2); - public static final int V_5_4_0_ID = 5040099; - public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0); - public static final int V_5_4_1_ID = 5040199; - public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_2_ID = 5040299; - public static final Version V_5_4_2 = new Version(V_5_4_2_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_4_3_ID = 5040399; - public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); - public static final int V_5_5_0_ID = 5050099; - public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_1_ID = 5050199; - public static final Version V_5_5_1 = new Version(V_5_5_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_2_ID = 5050299; - public static final Version V_5_5_2 = new Version(V_5_5_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_5_3_ID = 5050399; - public static final Version V_5_5_3 = new Version(V_5_5_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_0_ID = 5060099; - public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); - public static final int V_5_6_1_ID = 5060199; - public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_2_ID = 5060299; - public static final Version V_5_6_2 = new Version(V_5_6_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_3_ID = 5060399; - public static final Version V_5_6_3 = new Version(V_5_6_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_4_ID = 5060499; - public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_5_ID = 5060599; - public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_6_ID = 5060699; - public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_7_ID = 5060799; - public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_8_ID = 5060899; - public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_9_ID = 5060999; - public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_10_ID = 5061099; - public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); - public static final int V_5_6_11_ID = 5061199; - public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -174,10 +95,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_2_ID = 6030299; public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_3_ID = 6030399; - public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_1_ID = 6040199; + public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; @@ -200,10 +121,10 @@ public static Version fromId(int id) { return V_7_0_0_alpha1; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_1_ID: + return V_6_4_1; case V_6_4_0_ID: return V_6_4_0; - case V_6_3_3_ID: - return V_6_3_3; case V_6_3_2_ID: return V_6_3_2; case V_6_3_1_ID: @@ -246,84 +167,6 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_6_11_ID: - return V_5_6_11; - case V_5_6_10_ID: - return V_5_6_10; - case V_5_6_9_ID: - return V_5_6_9; - case V_5_6_8_ID: - return V_5_6_8; - case V_5_6_7_ID: - return V_5_6_7; - case V_5_6_6_ID: - return V_5_6_6; - case V_5_6_5_ID: - return V_5_6_5; - case V_5_6_4_ID: - return V_5_6_4; - case V_5_6_3_ID: - return V_5_6_3; - case V_5_6_2_ID: - return V_5_6_2; - case V_5_6_1_ID: - return V_5_6_1; - case V_5_6_0_ID: - return V_5_6_0; - case V_5_5_3_ID: - return V_5_5_3; - case V_5_5_2_ID: - return V_5_5_2; - case V_5_5_1_ID: - return V_5_5_1; - case V_5_5_0_ID: - return V_5_5_0; - case V_5_4_3_ID: - return V_5_4_3; - case V_5_4_2_ID: - return V_5_4_2; - case V_5_4_1_ID: - return V_5_4_1; - case V_5_4_0_ID: - return V_5_4_0; - case V_5_3_3_ID: - return V_5_3_3; - case V_5_3_2_ID: - return V_5_3_2; - case V_5_3_1_ID: - return V_5_3_1; - case V_5_3_0_ID: - return V_5_3_0; - case V_5_2_2_ID: - return V_5_2_2; - case V_5_2_1_ID: - return V_5_2_1; - case V_5_2_0_ID: - return V_5_2_0; - case V_5_1_2_ID: - return V_5_1_2; - case V_5_1_1_ID: - return V_5_1_1; - case V_5_0_2_ID: - return V_5_0_2; - case V_5_0_1_ID: - return V_5_0_1; - case V_5_0_0_ID: - return V_5_0_0; - case V_5_0_0_rc1_ID: - return V_5_0_0_rc1; - case V_5_0_0_beta1_ID: - return V_5_0_0_beta1; - case V_5_0_0_alpha5_ID: - return V_5_0_0_alpha5; - case V_5_0_0_alpha4_ID: - return V_5_0_0_alpha4; - case V_5_0_0_alpha3_ID: - return V_5_0_0_alpha3; - case V_5_0_0_alpha2_ID: - return V_5_0_0_alpha2; - case V_5_0_0_alpha1_ID: - return V_5_0_0_alpha1; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } @@ -473,8 +316,11 @@ private static class DeclaredVersionsHolder { * is a beta or RC release then the version itself is returned. */ public Version minimumCompatibilityVersion() { - if (major >= 6) { - // all major versions from 6 onwards are compatible with last minor series of the previous major + if (major == 6) { + // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore + return Version.fromId(5060099); + } else if (major >= 7) { + // all major versions from 7 onwards are compatible with last minor series of the previous major Version bwcVersion = null; for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 40960c3362086..b6959afba5d89 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Nullable; @@ -69,7 +68,6 @@ public ClusterAllocationExplainRequest() { public ClusterAllocationExplainRequest(StreamInput in) throws IOException { super(in); - checkVersion(in.getVersion()); this.index = in.readOptionalString(); this.shard = in.readOptionalVInt(); this.primary = in.readOptionalBoolean(); @@ -94,7 +92,6 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - checkVersion(out.getVersion()); super.writeTo(out); out.writeOptionalString(index); out.writeOptionalVInt(shard); @@ -251,11 +248,4 @@ public static ClusterAllocationExplainRequest parse(XContentParser parser) throw public void readFrom(StreamInput in) throws IOException { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - - private void checkVersion(Version version) { - if (version.before(Version.V_5_2_0)) { - throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0 + - " nodes, node version [" + version + "]"); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 3ae5c2d683a27..4798aeb67c199 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -59,10 +58,6 @@ public ClusterSearchShardsRequest(StreamInput in) throws IOException { routing = in.readOptionalString(); preference = in.readOptionalString(); - if (in.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - in.readStringArray(); - } indicesOptions = IndicesOptions.readIndicesOptions(in); } @@ -78,10 +73,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - out.writeStringArray(Strings.EMPTY_ARRAY); - } indicesOptions.writeIndicesOptions(out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 28c7903efde81..f8d448d0fe11c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; @@ -77,14 +76,12 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < nodes.length; i++) { nodes[i] = new DiscoveryNode(in); } - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - int size = in.readVInt(); - indicesAndFilters = new HashMap<>(); - for (int i = 0; i < size; i++) { - String index = in.readString(); - AliasFilter aliasFilter = new AliasFilter(in); - indicesAndFilters.put(index, aliasFilter); - } + int size = in.readVInt(); + indicesAndFilters = new HashMap<>(); + for (int i = 0; i < size; i++) { + String index = in.readString(); + AliasFilter aliasFilter = new AliasFilter(in); + indicesAndFilters.put(index, aliasFilter); } } @@ -99,12 +96,10 @@ public void writeTo(StreamOutput out) throws IOException { for (DiscoveryNode node : nodes) { node.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(indicesAndFilters.size()); - for (Map.Entry entry : indicesAndFilters.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + out.writeVInt(indicesAndFilters.size()); + for (Map.Entry entry : indicesAndFilters.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index b3b24b570eeda..41ae57031d320 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -28,7 +28,6 @@ import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.snapshots.SnapshotInfo.VERBOSE_INTRODUCED; /** * Get snapshot request @@ -75,9 +74,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { repository = in.readString(); snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - verbose = in.readBoolean(); - } + verbose = in.readBoolean(); } @Override @@ -86,9 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(repository); out.writeStringArray(snapshots); out.writeBoolean(ignoreUnavailable); - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - out.writeBoolean(verbose); - } + out.writeBoolean(verbose); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 6f702cbbe7c0a..d02d6272c9514 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -121,11 +121,7 @@ public void readFrom(StreamInput in) throws IOException { } id = in.readOptionalString(); content = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(content); - } + xContentType = in.readEnum(XContentType.class); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { context = in.readOptionalString(); source = new StoredScriptSource(in); @@ -143,9 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeBytesReference(content); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { out.writeOptionalString(context); source.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index d45ab2682a5ec..e571db951cbc1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.admin.indices.analyze; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -191,15 +190,10 @@ public void readFrom(StreamInput in) throws IOException { startOffset = in.readInt(); endOffset = in.readInt(); position = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - Integer len = in.readOptionalVInt(); - if (len != null) { - positionLength = len; - } else { - positionLength = 1; - } - } - else { + Integer len = in.readOptionalVInt(); + if (len != null) { + positionLength = len; + } else { positionLength = 1; } type = in.readOptionalString(); @@ -212,9 +206,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(startOffset); out.writeInt(endOffset); out.writeVInt(position); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeOptionalVInt(positionLength > 1 ? positionLength : null); - } + out.writeOptionalVInt(positionLength > 1 ? positionLength : null); out.writeOptionalString(type); out.writeMapWithConsistentOrder(attributes); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index c858d0bb10651..79192693620dd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.Version; import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -66,18 +65,14 @@ protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readShardsAcknowledged(in); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - index = in.readString(); - } + index = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeShardsAcknowledged(out); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeString(index); - } + out.writeString(index); } public String index() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 1556ee2341d27..a827444acb8c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -297,10 +297,6 @@ public void readFrom(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); type = in.readOptionalString(); source = in.readString(); - if (in.getVersion().before(Version.V_5_3_0)) { - // we do not know the format from earlier versions so convert if necessary - source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); - } if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readBoolean(); // updateAllTypes } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index d194b9acd1b7f..f9431a3ad02b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -492,11 +492,6 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { final String type = in.readString(); String mappingSource = in.readString(); - if (in.getVersion().before(Version.V_5_3_0)) { - // we do not know the incoming type so convert it if needed - mappingSource = - XContentHelper.convertToJson(new BytesArray(mappingSource), false, false, XContentFactory.xContentType(mappingSource)); - } mappings.put(type, mappingSource); } int customSize = in.readVInt(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index d0a62fe771d1f..b60bc407ce70c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -120,11 +120,7 @@ public void readFrom(StreamInput in) throws IOException { } else { index = in.readString(); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - shard = in.readInt(); - } else { - shard = RANDOM_SHARD; - } + shard = in.readInt(); valid = in.readBoolean(); explanation = in.readOptionalString(); error = in.readOptionalString(); @@ -137,9 +133,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeString(index); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeInt(shard); - } + out.writeInt(shard); out.writeBoolean(valid); out.writeOptionalString(explanation); out.writeOptionalString(error); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 7694e7583c898..a30c9ba846107 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; @@ -156,9 +155,7 @@ public void readFrom(StreamInput in) throws IOException { } explain = in.readBoolean(); rewrite = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - allShards = in.readBoolean(); - } + allShards = in.readBoolean(); } @Override @@ -171,9 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeBoolean(explain); out.writeBoolean(rewrite); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(allShards); - } + out.writeBoolean(allShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index fb535d312cf65..9b9be3a41476e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -244,8 +244,8 @@ public void writeTo(StreamOutput out) throws IOException { } private static boolean supportsAbortedFlag(Version version) { - // The "aborted" flag was added for 5.5.3 and 5.6.0, but was not in 6.0.0-beta2 - return version.after(Version.V_6_0_0_beta2) || (version.major == 5 && version.onOrAfter(Version.V_5_5_3)); + // The "aborted" flag was not in 6.0.0-beta2 + return version.after(Version.V_6_0_0_beta2); } /** @@ -447,11 +447,7 @@ public static BulkItemResponse readBulkItem(StreamInput in) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - opType = OpType.fromId(in.readByte()); - } else { - opType = OpType.fromString(in.readString()); - } + opType = OpType.fromId(in.readByte()); byte type = in.readByte(); if (type == 0) { @@ -474,11 +470,7 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeByte(opType.getId()); - } else { - out.writeString(opType.getLowercase()); - } + out.writeByte(opType.getId()); if (response == null) { out.writeByte((byte) 2); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index e3e94e8233944..ea4a5086d7b98 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -521,7 +521,7 @@ private long relativeTime() { void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener listener) { long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { + ingestService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 636af6101ae0e..22d231d3711be 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -81,24 +80,18 @@ void setMergeResults(boolean mergeResults) { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fields = in.readStringArray(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - mergeResults = in.readBoolean(); - } else { - mergeResults = true; - } + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + mergeResults = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(fields); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - out.writeBoolean(mergeResults); - } + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeBoolean(mergeResults); } /** diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 959b4e572b714..178639bd4348f 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.Tuple; @@ -95,11 +94,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); - } else { - indexResponses = Collections.emptyList(); - } + indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } private static Map readField(StreamInput in) throws IOException { @@ -110,10 +105,7 @@ private static Map readField(StreamInput in) throws I public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(indexResponses); - } - + out.writeList(indexResponses); } private static void writeField(StreamOutput out, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index d3cd052ecad1e..6b4c74fe56cd4 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -27,26 +27,23 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class DeletePipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; @Inject - public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, IngestService ingestService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { - super(settings, DeletePipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); - this.clusterService = clusterService; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeletePipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new); + this.ingestService = ingestService; } @Override @@ -60,8 +57,9 @@ protected AcknowledgedResponse newResponse() { } @Override - protected void masterOperation(DeletePipelineRequest request, ClusterState state, ActionListener listener) throws Exception { - pipelineStore.delete(clusterService, request, listener); + protected void masterOperation(DeletePipelineRequest request, ClusterState state, + ActionListener listener) throws Exception { + ingestService.delete(request, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 191ed87a42cde..540f46982a569 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -29,21 +29,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class GetPipelineTransportAction extends TransportMasterNodeReadAction { - - private final PipelineStore pipelineStore; - + @Inject public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, indexNameExpressionResolver); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); } @Override @@ -58,7 +54,7 @@ protected GetPipelineResponse newResponse() { @Override protected void masterOperation(GetPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { - listener.onResponse(new GetPipelineResponse(pipelineStore.getPipelines(state, request.getIds()))); + listener.onResponse(new GetPipelineResponse(IngestService.getPipelines(state, request.getIds()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 6447b0557db0c..abff28bcf553c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; @@ -82,11 +81,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -94,9 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index abe8f49272c77..38e1f2fb54b5b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -32,12 +32,10 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.PipelineStore; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestInfo; -import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -46,19 +44,19 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction { - private final PipelineStore pipelineStore; - private final ClusterService clusterService; + private final IngestService ingestService; private final NodeClient client; @Inject - public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService, - NodeClient client) { - super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new); - this.clusterService = clusterService; + public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IngestService ingestService, NodeClient client) { + super( + settings, PutPipelineAction.NAME, transportService, ingestService.getClusterService(), + threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new + ); this.client = client; - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; } @Override @@ -84,7 +82,7 @@ public void onResponse(NodesInfoResponse nodeInfos) { for (NodeInfo nodeInfo : nodeInfos.getNodes()) { ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } - pipelineStore.put(clusterService, ingestInfos, request, listener); + ingestService.putPipeline(ingestInfos, request, listener); } catch (Exception e) { onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 9a7d6bb7feea9..fecee5f265fe9 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; @@ -32,8 +31,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import java.io.IOException; import java.util.ArrayList; @@ -76,11 +75,7 @@ public SimulatePipelineRequest(BytesReference source, XContentType xContentType) id = in.readOptionalString(); verbose = in.readBoolean(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(source); - } + xContentType = in.readEnum(XContentType.class); } @Override @@ -123,9 +118,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeBoolean(verbose); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override @@ -164,14 +157,13 @@ public boolean isVerbose() { } } - private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory(); static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; - static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, PipelineStore pipelineStore) { + static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, IngestService ingestService) { if (pipelineId == null) { throw new IllegalArgumentException("param [pipeline] is null"); } - Pipeline pipeline = pipelineStore.get(pipelineId); + Pipeline pipeline = ingestService.getPipeline(pipelineId); if (pipeline == null) { throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist"); } @@ -179,9 +171,9 @@ static Parsed parseWithPipelineId(String pipelineId, Map config, return new Parsed(pipeline, ingestDocumentList, verbose); } - static Parsed parse(Map config, boolean verbose, PipelineStore pipelineStore) throws Exception { + static Parsed parse(Map config, boolean verbose, IngestService pipelineStore) throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); - Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); + Pipeline pipeline = Pipeline.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); List ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 2e898c1895f9a..ad8577d5244de 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -26,8 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.NodeService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,15 +35,15 @@ public class SimulatePipelineTransportAction extends HandledTransportAction { - private final PipelineStore pipelineStore; + private final IngestService ingestService; private final SimulateExecutionService executionService; @Inject public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ActionFilters actionFilters, NodeService nodeService) { + ActionFilters actionFilters, IngestService ingestService) { super(settings, SimulatePipelineAction.NAME, transportService, actionFilters, (Writeable.Reader) SimulatePipelineRequest::new); - this.pipelineStore = nodeService.getIngestService().getPipelineStore(); + this.ingestService = ingestService; this.executionService = new SimulateExecutionService(threadPool); } @@ -55,9 +54,9 @@ protected void doExecute(Task task, SimulatePipelineRequest request, ActionListe final SimulatePipelineRequest.Parsed simulateRequest; try { if (request.getId() != null) { - simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), pipelineStore); + simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), ingestService); } else { - simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore); + simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService); } } catch (Exception e) { listener.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index e67517c4852b8..e560e53ed7b64 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -135,10 +135,8 @@ public SearchRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - maxConcurrentShardRequests = in.readVInt(); - preFilterShardSize = in.readVInt(); - } + maxConcurrentShardRequests = in.readVInt(); + preFilterShardSize = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -160,10 +158,8 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(maxConcurrentShardRequests); - out.writeVInt(preFilterShardSize); - } + out.writeVInt(maxConcurrentShardRequests); + out.writeVInt(preFilterShardSize); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 2a97798764e59..0273d5e58219a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -374,9 +374,7 @@ public void readFrom(StreamInput in) throws IOException { } scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - skippedShards = in.readVInt(); - } + skippedShards = in.readVInt(); } @Override @@ -395,9 +393,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); - if(out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeVInt(skippedShards); - } + out.writeVInt(skippedShards); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 133d0291df597..a4ea2616e0a21 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -113,17 +112,8 @@ public void sendFreeContext(Transport.Connection connection, long contextId, fin public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - if (connection.getNode().getVersion().onOrAfter(Version.V_5_6_0)) { - transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, - TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); - } else { - // this might look weird but if we are in a CrossClusterSearch environment we can get a connection - // to a pre 5.latest node which is proxied by a 5.latest node under the hood since we are only compatible with 5.latest - // instead of sending the request we shortcut it here and let the caller deal with this -- see #25704 - // also failing the request instead of returning a fake answer might trigger a retry on a replica which might be on a - // compatible node - throw new IllegalArgumentException("can_match is not supported on pre 5.6 nodes"); - } + transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new)); } public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index f416627c1e088..d6bf911e572c3 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -498,14 +498,10 @@ public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { doc = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(doc); - } + xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { in.readOptionalString(); // _parent } @@ -546,9 +542,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } out.writeOptionalString(routing); if (out.getVersion().before(Version.V_7_0_0_alpha1)) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 1a028042db29b..c5a8e806f41a4 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.node.NodeValidationException; @@ -393,17 +394,22 @@ long getMaxFileSize() { static class MaxMapCountCheck implements BootstrapCheck { - private static final long LIMIT = 1 << 18; + static final long LIMIT = 1 << 18; @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { - final String message = String.format( - Locale.ROOT, - "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", - getMaxMapCount(), - LIMIT); - return BootstrapCheckResult.failure(message); + public BootstrapCheckResult check(final BootstrapContext context) { + // we only enforce the check if mmapfs is an allowed store type + if (IndexModule.NODE_STORE_ALLOW_MMAPFS.get(context.settings)) { + if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { + final String message = String.format( + Locale.ROOT, + "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", + getMaxMapCount(), + LIMIT); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } else { return BootstrapCheckResult.success(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 234d1ef9f17fd..0134b798c72fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -40,8 +40,6 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshot_deletions"; - // the version where SnapshotDeletionsInProgress was introduced - public static final Version VERSION_INTRODUCED = Version.V_5_2_0; // the list of snapshot deletion request entries private final List entries; @@ -135,7 +133,7 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException @Override public Version getMinimalSupportedVersion() { - return VERSION_INTRODUCED; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 87563c968af17..565c5134d1b38 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -48,12 +48,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshots"; - // denotes an undefined repository state id, which will happen when receiving a cluster state with - // a snapshot in progress from a pre 5.2.x node - public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; - // the version where repository state ids were introduced - private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0; - @Override public boolean equals(Object o) { if (this == o) return true; @@ -432,10 +426,7 @@ public SnapshotsInProgress(StreamInput in) throws IOException { builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState, reason)); } } - long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID; - if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - repositoryStateId = in.readLong(); - } + long repositoryStateId = in.readLong(); entries[i] = new Entry(snapshot, includeGlobalState, partial, @@ -471,9 +462,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(shardEntry.value.state().value()); } } - if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { - out.writeLong(entry.repositoryStateId); - } + out.writeLong(entry.repositoryStateId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index efbd262b16dda..fc09741f4d9c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -138,11 +137,7 @@ public void readFrom(StreamInput in) throws IOException { retryable = in.readBoolean(); disableStatePersistence = in.readBoolean(); status = RestStatus.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - allowReleaseResources = in.readBoolean(); - } else { - allowReleaseResources = false; - } + allowReleaseResources = in.readBoolean(); } @Override @@ -156,9 +151,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); RestStatus.writeTo(out, status); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(allowReleaseResources); - } + out.writeBoolean(allowReleaseResources); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java index 153fc2cbe3e7d..8b97f1357fa00 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; @@ -82,11 +81,7 @@ public NodeAllocationResult(DiscoveryNode node, Decision decision, int weightRan public NodeAllocationResult(StreamInput in) throws IOException { node = new DiscoveryNode(in); shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new); - if (in.getVersion().before(Version.V_5_2_1)) { - canAllocateDecision = Decision.readFrom(in); - } else { - canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); - } + canAllocateDecision = in.readOptionalWriteable(Decision::readFrom); nodeDecision = AllocationDecision.readFrom(in); weightRanking = in.readVInt(); } @@ -95,15 +90,7 @@ public NodeAllocationResult(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeOptionalWriteable(shardStoreInfo); - if (out.getVersion().before(Version.V_5_2_1)) { - if (canAllocateDecision == null) { - Decision.NO.writeTo(out); - } else { - canAllocateDecision.writeTo(out); - } - } else { - out.writeOptionalWriteable(canAllocateDecision); - } + out.writeOptionalWriteable(canAllocateDecision); nodeDecision.writeTo(out); out.writeVInt(weightRanking); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index 0ee8d095f49a6..bf65162d215b9 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -25,6 +25,8 @@ import java.util.ArrayList; import java.util.Collection; +import static org.apache.lucene.geo.GeoUtils.MAX_LAT_INCL; + /** * Utilities for converting to/from the GeoHash standard * @@ -48,6 +50,8 @@ public class GeoHashUtils { private static final double LAT_SCALE = (0x1L<>>= 4; - // deinterleave and add 1 to lat and lon to get topRight - long lat = BitUtil.deinterleave(ghLong >>> 1) + 1; - long lon = BitUtil.deinterleave(ghLong) + 1; - GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len); - - return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + // deinterleave + long lon = BitUtil.deinterleave(ghLong >>> 1); + long lat = BitUtil.deinterleave(ghLong); + if (lat < MAX_LAT_BITS) { + // add 1 to lat and lon to get topRight + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)(lat + 1), (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); + } else { + // We cannot go north of north pole, so just using 90 degrees instead of calculating it using + // add 1 to lon to get lon of topRight, we are going to use 90 for lat + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lat, (int)(lon + 1)) << 4 | len); + return new Rectangle(bottomLeft.lat(), MAX_LAT_INCL, bottomLeft.lon(), topRight.lon()); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 90420369513ae..276129eabb60d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -105,7 +105,7 @@ public class Lucene { public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; - public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f); + public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, Float.NaN); public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 8847c8138a706..9f1d7d8a39530 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -199,7 +199,7 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { - addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, logger, settings)); + addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, settings)); } /** @@ -208,11 +208,78 @@ public synchronized void addSettingsUpdateConsumer(Consumer consumer, */ public synchronized void addAffixUpdateConsumer(Setting.AffixSetting setting, BiConsumer consumer, BiConsumer validator) { + ensureSettingIsRegistered(setting); + addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); + } + + /** + * Adds a affix settings consumer that accepts the values for two settings. The consumer is only notified if one or both settings change + * and if the provided validator succeeded. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ * This method registers a compound updater that is useful if two settings are depending on each other. + * The consumer is always provided with both values even if only one of the two changes. + */ + public synchronized void addAffixUpdateConsumer(Setting.AffixSetting settingA, Setting.AffixSetting settingB, + BiConsumer> consumer, + BiConsumer> validator) { + // it would be awesome to have a generic way to do that ie. a set of settings that map to an object with a builder + // down the road this would be nice to have! + ensureSettingIsRegistered(settingA); + ensureSettingIsRegistered(settingB); + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater((a,b)-> {}, logger, (a,b)-> {}); + + addSettingsUpdater(new SettingUpdater>>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return affixUpdaterA.hasChanged(current, previous) || affixUpdaterB.hasChanged(current, previous); + } + + @Override + public Map> getValue(Settings current, Settings previous) { + Map> map = new HashMap<>(); + BiConsumer aConsumer = (key, value) -> { + assert map.containsKey(key) == false : "duplicate key: " + key; + map.put(key, new Tuple<>(value, settingB.getConcreteSettingForNamespace(key).get(current))); + }; + BiConsumer bConsumer = (key, value) -> { + Tuple abTuple = map.get(key); + if (abTuple != null) { + map.put(key, new Tuple<>(abTuple.v1(), value)); + } else { + assert settingA.getConcreteSettingForNamespace(key).get(current).equals(settingA.getConcreteSettingForNamespace + (key).get(previous)) : "expected: " + settingA.getConcreteSettingForNamespace(key).get(current) + + " but was " + settingA.getConcreteSettingForNamespace(key).get(previous); + map.put(key, new Tuple<>(settingA.getConcreteSettingForNamespace(key).get(current), value)); + } + }; + SettingUpdater, A>> affixUpdaterA = settingA.newAffixUpdater(aConsumer, logger, (a,b) ->{}); + SettingUpdater, B>> affixUpdaterB = settingB.newAffixUpdater(bConsumer, logger, (a,b) ->{}); + affixUpdaterA.apply(current, previous); + affixUpdaterB.apply(current, previous); + for (Map.Entry> entry : map.entrySet()) { + validator.accept(entry.getKey(), entry.getValue()); + } + return Collections.unmodifiableMap(map); + } + + @Override + public void apply(Map> values, Settings current, Settings previous) { + for (Map.Entry> entry : values.entrySet()) { + consumer.accept(entry.getKey(), entry.getValue()); + } + } + }); + } + + private void ensureSettingIsRegistered(Setting.AffixSetting setting) { final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); if (setting != registeredSetting) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } - addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index de8691b3b6871..bf53a3dc01a7a 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -63,6 +63,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; @@ -264,6 +265,7 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, + IndexModule.NODE_STORE_ALLOW_MMAPFS, ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 94edb5a297afa..b98c2753d701b 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -547,7 +547,7 @@ public String toString() { }; } - static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, Logger logger, + static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, final List> configuredSettings) { return new AbstractScopedSettings.SettingUpdater() { diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 715b78b14ffdb..7f2eae492fd56 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -21,10 +21,11 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -59,7 +60,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -84,8 +84,10 @@ */ public final class IndexModule { + public static final Setting NODE_STORE_ALLOW_MMAPFS = Setting.boolSetting("node.store.allow_mmapfs", true, Property.NodeScope); + public static final Setting INDEX_STORE_TYPE_SETTING = - new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); /** On which extensions to load data into the file-system cache upon opening of files. * This only works with the mmap directory, and even in that case is still @@ -289,7 +291,7 @@ IndexEventListener freeze() { // pkg private for testing } } - private static boolean isBuiltinType(String storeType) { + public static boolean isBuiltinType(String storeType) { for (Type type : Type.values()) { if (type.match(storeType)) { return true; @@ -298,21 +300,48 @@ private static boolean isBuiltinType(String storeType) { return false; } + public enum Type { - NIOFS, - MMAPFS, - SIMPLEFS, - FS; + NIOFS("niofs"), + MMAPFS("mmapfs"), + SIMPLEFS("simplefs"), + FS("fs"); + + private final String settingsKey; + + Type(final String settingsKey) { + this.settingsKey = settingsKey; + } + + private static final Map TYPES; + + static { + final Map types = new HashMap<>(4); + for (final Type type : values()) { + types.put(type.settingsKey, type); + } + TYPES = Collections.unmodifiableMap(types); + } public String getSettingsKey() { - return this.name().toLowerCase(Locale.ROOT); + return this.settingsKey; + } + + public static Type fromSettingsKey(final String key) { + final Type type = TYPES.get(key); + if (type == null) { + throw new IllegalArgumentException("no matching type for [" + key + "]"); + } + return type; } + /** * Returns true iff this settings matches the type. */ public boolean match(String setting) { return getSettingsKey().equals(setting); } + } /** @@ -325,6 +354,16 @@ public interface IndexSearcherWrapperFactory { IndexSearcherWrapper newWrapper(IndexService indexService); } + public static Type defaultStoreType(final boolean allowMmapfs) { + if (allowMmapfs && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + return Type.MMAPFS; + } else if (Constants.WINDOWS) { + return Type.SIMPLEFS; + } else { + return Type.NIOFS; + } + } + public IndexService newIndexService( NodeEnvironment environment, NamedXContentRegistry xContentRegistry, @@ -343,20 +382,7 @@ public IndexService newIndexService( IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); - final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); - final IndexStore store; - if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) { - store = new IndexStore(indexSettings); - } else { - Function factory = indexStoreFactories.get(storeType); - if (factory == null) { - throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); - } - store = factory.apply(indexSettings); - if (store == null) { - throw new IllegalStateException("store must not be null"); - } - } + final IndexStore store = getIndexStore(indexSettings, indexStoreFactories); final QueryCache queryCache; if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { BiFunction queryCacheProvider = forceQueryCacheProvider.get(); @@ -375,6 +401,39 @@ public IndexService newIndexService( indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } + private static IndexStore getIndexStore( + final IndexSettings indexSettings, final Map> indexStoreFactories) { + final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); + final Type type; + final Boolean allowMmapfs = NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings()); + if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { + type = defaultStoreType(allowMmapfs); + } else { + if (isBuiltinType(storeType)) { + type = Type.fromSettingsKey(storeType); + } else { + type = null; + } + } + if (type != null && type == Type.MMAPFS && allowMmapfs == false) { + throw new IllegalArgumentException("store type [mmapfs] is not allowed"); + } + final IndexStore store; + if (storeType.isEmpty() || isBuiltinType(storeType)) { + store = new IndexStore(indexSettings); + } else { + Function factory = indexStoreFactories.get(storeType); + if (factory == null) { + throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); + } + store = factory.apply(indexSettings); + if (store == null) { + throw new IllegalStateException("store must not be null"); + } + } + return store; + } + /** * creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing. * doing so will result in an exception. diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index edf00e63e49cf..020dac78d49e2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1642,10 +1642,12 @@ public interface Warmer { public abstract int fillSeqNoGaps(long primaryTerm) throws IOException; /** - * Performs recovery from the transaction log. + * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). * This operation will close the engine if the recovery fails. + * + * @param recoverUpToSeqNo the upper bound, inclusive, of sequence number to be recovered */ - public abstract Engine recoverFromTranslog() throws IOException; + public abstract Engine recoverFromTranslog(long recoverUpToSeqNo) throws IOException; /** * Do not replay translog operations, but make the engine be ready. diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 7e39aacd43544..1baaaf2e1b14d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -393,7 +393,7 @@ private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) { } @Override - public InternalEngine recoverFromTranslog() throws IOException { + public InternalEngine recoverFromTranslog(long recoverUpToSeqNo) throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -401,7 +401,7 @@ public InternalEngine recoverFromTranslog() throws IOException { throw new IllegalStateException("Engine has already been recovered"); } try { - recoverFromTranslogInternal(); + recoverFromTranslogInternal(recoverUpToSeqNo); } catch (Exception e) { try { pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush @@ -423,11 +423,12 @@ public void skipTranslogRecovery() { pendingTranslogRecovery.set(false); // we are good - now we can commit } - private void recoverFromTranslogInternal() throws IOException { + private void recoverFromTranslogInternal(long recoverUpToSeqNo) throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; - final long translogGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGen)) { + final long translogFileGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), translogFileGen), recoverUpToSeqNo)) { opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot); } catch (Exception e) { throw new EngineException(shardId, "failed to recover from translog", e); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 9c334f795511d..20b4bb37cc7aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -166,7 +167,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); } else if (propName.equals("norms")) { - builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, "norms") == false); + TypeParsers.parseNorms(builder, name, propNode); iterator.remove(); } else if (propName.equals("eager_global_ordinals")) { builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals")); @@ -256,8 +257,10 @@ public void setSplitQueriesOnWhitespace(boolean splitQueriesOnWhitespace) { public Query existsQuery(QueryShardContext context) { if (hasDocValues()) { return new DocValuesFieldExistsQuery(name()); - } else { + } else if (omitNorms()) { return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); } } @@ -366,17 +369,19 @@ protected void parseCreateField(ParseContext context, List field // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); - if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { + if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { Field field = new Field(fieldType().name(), binaryValue, fieldType()); fields.add(field); + + if (fieldType().hasDocValues() == false && fieldType().omitNorms()) { + createFieldNamesField(context, fields); + } } + if (fieldType().hasDocValues()) { fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); - } else if (fieldType().stored() || fieldType().indexOptions() != IndexOptions.NONE) { - createFieldNamesField(context, fields); } } - @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 5f3f4a4de49d6..4a3fa852e7f7d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; @@ -314,7 +315,13 @@ public boolean isAggregatable() { /** Generates a query that will only match documents that contain the given value. * The default implementation returns a {@link TermQuery} over the value bytes, * boosted by {@link #boost()}. - * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */ + * @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type or if the field is not searchable + * due to the way it is configured (eg. not indexed) + * @throws ElasticsearchParseException if {@code value} cannot be converted to the expected data type + * @throws UnsupportedOperationException if the field is not searchable regardless of options + * @throws QueryShardException if the field is not searchable regardless of options + */ + // TODO: Standardize exception types public abstract Query termQuery(Object value, @Nullable QueryShardContext context); /** Build a constant-scoring query that matches all values. The default implementation uses a diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index a6a5fab0d04f9..667f4a736173d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -122,8 +122,7 @@ private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, St } } - public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode, - Mapper.TypeParser.ParserContext parserContext) { + public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode) { builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false); } @@ -140,7 +139,7 @@ public static void parseTextField(FieldMapper.Builder builder, String name, Map< final String propName = entry.getKey(); final Object propNode = entry.getValue(); if ("norms".equals(propName)) { - parseNorms(builder, name, propNode, parserContext); + parseNorms(builder, name, propNode); iterator.remove(); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 8b2db374c8da9..894a886182d35 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -150,13 +150,7 @@ public InnerHitBuilder(String name) { */ public InnerHitBuilder(StreamInput in) throws IOException { name = in.readOptionalString(); - if (in.getVersion().before(Version.V_5_5_0)) { - in.readOptionalString(); - in.readOptionalString(); - } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - ignoreUnmapped = in.readBoolean(); - } + ignoreUnmapped = in.readBoolean(); from = in.readVInt(); size = in.readVInt(); explain = in.readBoolean(); @@ -191,14 +185,6 @@ public InnerHitBuilder(StreamInput in) throws IOException { } } highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); - if (in.getVersion().before(Version.V_5_5_0)) { - /** - * this is needed for BWC with nodes pre 5.5 - */ - in.readNamedWriteable(QueryBuilder.class); - boolean hasChildren = in.readBoolean(); - assert hasChildren == false; - } if (in.getVersion().onOrAfter(Version.V_6_4_0)) { this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); } @@ -206,9 +192,6 @@ public InnerHitBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_5_5_0)) { - throw new IOException("Invalid output version, must >= " + Version.V_5_5_0.toString()); - } out.writeOptionalString(name); out.writeBoolean(ignoreUnmapped); out.writeVInt(from); @@ -252,84 +235,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - /** - * BWC serialization for nested {@link InnerHitBuilder}. - * Should only be used to send nested inner hits to nodes pre 5.5. - */ - protected void writeToNestedBWC(StreamOutput out, QueryBuilder query, String nestedPath) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, nestedPath, null); - } - - /** - * BWC serialization for collapsing {@link InnerHitBuilder}. - * Should only be used to send collapsing inner hits to nodes pre 5.5. - */ - public void writeToCollapseBWC(StreamOutput out) throws IOException { - assert out.getVersion().before(Version.V_5_5_0) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, new MatchAllQueryBuilder(), null, null); - } - - /** - * BWC serialization for parent/child {@link InnerHitBuilder}. - * Should only be used to send hasParent or hasChild inner hits to nodes pre 5.5. - */ - public void writeToParentChildBWC(StreamOutput out, QueryBuilder query, String parentChildPath) throws IOException { - assert(out.getVersion().before(Version.V_5_5_0)) : - "invalid output version, must be < " + Version.V_5_5_0.toString(); - writeToBWC(out, query, null, parentChildPath); - } - - private void writeToBWC(StreamOutput out, - QueryBuilder query, - String nestedPath, - String parentChildPath) throws IOException { - out.writeOptionalString(name); - if (nestedPath != null) { - out.writeOptionalString(nestedPath); - out.writeOptionalString(null); - } else { - out.writeOptionalString(null); - out.writeOptionalString(parentChildPath); - } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeBoolean(ignoreUnmapped); - } - out.writeVInt(from); - out.writeVInt(size); - out.writeBoolean(explain); - out.writeBoolean(version); - out.writeBoolean(trackScores); - out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields == null - ? null - : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); - boolean hasScriptFields = scriptFields != null; - out.writeBoolean(hasScriptFields); - if (hasScriptFields) { - out.writeVInt(scriptFields.size()); - Iterator iterator = scriptFields.stream() - .sorted(Comparator.comparing(ScriptField::fieldName)).iterator(); - while (iterator.hasNext()) { - iterator.next().writeTo(out); - } - } - out.writeOptionalWriteable(fetchSourceContext); - boolean hasSorts = sorts != null; - out.writeBoolean(hasSorts); - if (hasSorts) { - out.writeVInt(sorts.size()); - for (SortBuilder sort : sorts) { - out.writeNamedWriteable(sort); - } - } - out.writeOptionalWriteable(highlightBuilder); - out.writeNamedWriteable(query); - out.writeBoolean(false); - } - public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 0de474f8b9901..950c9e052adae 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -47,7 +46,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -220,11 +218,7 @@ public Item(@Nullable String index, @Nullable String type, XContentBuilder doc) type = in.readOptionalString(); if (in.readBoolean()) { doc = (BytesReference) in.readGenericValue(); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - xContentType = in.readEnum(XContentType.class); - } else { - xContentType = XContentHelper.xContentType(doc); - } + xContentType = in.readEnum(XContentType.class); } else { id = in.readString(); } @@ -242,9 +236,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeGenericValue(doc); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } else { out.writeString(id); } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 889f41a037f86..8d7c0190eb210 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -103,15 +102,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(path); out.writeVInt(scoreMode.ordinal()); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_5_5_0)) { - final boolean hasInnerHit = innerHitBuilder != null; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHitBuilder.writeToNestedBWC(out, query, path); - } - } else { - out.writeOptionalWriteable(innerHitBuilder); - } + out.writeOptionalWriteable(innerHitBuilder); out.writeBoolean(ignoreUnmapped); } @@ -398,7 +389,7 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); + result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index e9d53d8e82948..0289ce6f6ae44 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -175,9 +175,6 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { analyzer = in.readOptionalString(); quoteAnalyzer = in.readOptionalString(); quoteFieldSuffix = in.readOptionalString(); - if (in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // auto_generate_phrase_query - } allowLeadingWildcard = in.readOptionalBoolean(); analyzeWildcard = in.readOptionalBoolean(); enablePositionIncrements = in.readBoolean(); @@ -186,27 +183,15 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { fuzzyMaxExpansions = in.readVInt(); fuzzyRewrite = in.readOptionalString(); phraseSlop = in.readVInt(); - if (in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // use_dismax - tieBreaker = in.readFloat(); - type = DEFAULT_TYPE; - } else { - type = MultiMatchQueryBuilder.Type.readFromStream(in); - tieBreaker = in.readOptionalFloat(); - } + type = MultiMatchQueryBuilder.Type.readFromStream(in); + tieBreaker = in.readOptionalFloat(); + rewrite = in.readOptionalString(); minimumShouldMatch = in.readOptionalString(); lenient = in.readOptionalBoolean(); timeZone = in.readOptionalTimeZone(); escape = in.readBoolean(); maxDeterminizedStates = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_1_1) && in.getVersion().before(Version.V_6_0_0_beta1)) { - in.readBoolean(); // split_on_whitespace - Boolean useAllField = in.readOptionalBoolean(); - if (useAllField != null && useAllField) { - defaultField = "*"; - } - } if (in.getVersion().onOrAfter(Version.V_6_1_0)) { autoGenerateSynonymsPhraseQuery = in.readBoolean(); fuzzyTranspositions = in.readBoolean(); @@ -226,9 +211,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(this.analyzer); out.writeOptionalString(this.quoteAnalyzer); out.writeOptionalString(this.quoteFieldSuffix); - if (out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(false); // auto_generate_phrase_query - } out.writeOptionalBoolean(this.allowLeadingWildcard); out.writeOptionalBoolean(this.analyzeWildcard); out.writeBoolean(this.enablePositionIncrements); @@ -237,24 +219,14 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(this.fuzzyMaxExpansions); out.writeOptionalString(this.fuzzyRewrite); out.writeVInt(this.phraseSlop); - if (out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(true); // use_dismax - out.writeFloat(tieBreaker != null ? tieBreaker : 0.0f); - } else { - type.writeTo(out); - out.writeOptionalFloat(tieBreaker); - } + type.writeTo(out); + out.writeOptionalFloat(tieBreaker); out.writeOptionalString(this.rewrite); out.writeOptionalString(this.minimumShouldMatch); out.writeOptionalBoolean(this.lenient); out.writeOptionalTimeZone(timeZone); out.writeBoolean(this.escape); out.writeVInt(this.maxDeterminizedStates); - if (out.getVersion().onOrAfter(Version.V_5_1_1) && out.getVersion().before(Version.V_6_0_0_beta1)) { - out.writeBoolean(false); // split_on_whitespace - Boolean useAllFields = defaultField == null ? null : Regex.isMatchAllPattern(defaultField); - out.writeOptionalBoolean(useAllFields); - } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeBoolean(autoGenerateSynonymsPhraseQuery); out.writeBoolean(fuzzyTranspositions); @@ -328,8 +300,9 @@ public Map fields() { /** * @param type Sets how multiple fields should be combined to build textual part queries. */ - public void type(MultiMatchQueryBuilder.Type type) { + public QueryStringQueryBuilder type(MultiMatchQueryBuilder.Type type) { this.type = type; + return this; } /** @@ -388,7 +361,7 @@ public QueryStringQueryBuilder analyzer(String analyzer) { this.analyzer = analyzer; return this; } - + /** * The optional analyzer used to analyze the query string. Note, if a field has search analyzer * defined for it, then it will be used automatically. Defaults to the smart search analyzer. @@ -899,9 +872,9 @@ protected boolean doEquals(QueryStringQueryBuilder other) { Objects.equals(tieBreaker, other.tieBreaker) && Objects.equals(rewrite, other.rewrite) && Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && - Objects.equals(lenient, other.lenient) && + Objects.equals(lenient, other.lenient) && Objects.equals( - timeZone == null ? null : timeZone.getID(), + timeZone == null ? null : timeZone.getID(), other.timeZone == null ? null : other.timeZone.getID()) && Objects.equals(escape, other.escape) && Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) && diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 6223254874d07..b297036f2f37b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -108,14 +107,12 @@ public RangeQueryBuilder(StreamInput in) throws IOException { if (formatString != null) { format = Joda.forPattern(formatString); } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = in.readOptionalString(); - if (relationString != null) { - relation = ShapeRelation.getRelationByName(relationString); - if (relation != null && !isRelationAllowed(relation)) { - throw new IllegalArgumentException( - "[range] query does not support relation [" + relationString + "]"); - } + String relationString = in.readOptionalString(); + if (relationString != null) { + relation = ShapeRelation.getRelationByName(relationString); + if (relation != null && !isRelationAllowed(relation)) { + throw new IllegalArgumentException( + "[range] query does not support relation [" + relationString + "]"); } } } @@ -139,13 +136,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { formatString = this.format.format(); } out.writeOptionalString(formatString); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - String relationString = null; - if (this.relation != null) { - relationString = this.relation.getRelationName(); - } - out.writeOptionalString(relationString); + String relationString = null; + if (this.relation != null) { + relationString = this.relation.getRelationName(); } + out.writeOptionalString(relationString); } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 46a958b58fe28..473aa636caab0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -168,27 +168,11 @@ public SimpleQueryStringBuilder(StreamInput in) throws IOException { flags = in.readInt(); analyzer = in.readOptionalString(); defaultOperator = Operator.readFromStream(in); - if (in.getVersion().before(Version.V_5_1_1)) { - in.readBoolean(); // lowercase_expanded_terms - } settings.lenient(in.readBoolean()); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.lenientSet = in.readBoolean(); - } + this.lenientSet = in.readBoolean(); settings.analyzeWildcard(in.readBoolean()); - if (in.getVersion().before(Version.V_5_1_1)) { - in.readString(); // locale - } minimumShouldMatch = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - settings.quoteFieldSuffix(in.readOptionalString()); - if (in.getVersion().before(Version.V_6_0_0_beta2)) { - Boolean useAllFields = in.readOptionalBoolean(); - if (useAllFields != null && useAllFields) { - useAllFields(true); - } - } - } + settings.quoteFieldSuffix(in.readOptionalString()); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { settings.autoGenerateSynonymsPhraseQuery(in.readBoolean()); settings.fuzzyPrefixLength(in.readVInt()); @@ -208,28 +192,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeInt(flags); out.writeOptionalString(analyzer); defaultOperator.writeTo(out); - if (out.getVersion().before(Version.V_5_1_1)) { - out.writeBoolean(true); // lowercase_expanded_terms - } out.writeBoolean(settings.lenient()); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeBoolean(lenientSet); - } + out.writeBoolean(lenientSet); out.writeBoolean(settings.analyzeWildcard()); - if (out.getVersion().before(Version.V_5_1_1)) { - out.writeString(Locale.ROOT.toLanguageTag()); // locale - } out.writeOptionalString(minimumShouldMatch); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalString(settings.quoteFieldSuffix()); - if (out.getVersion().before(Version.V_6_0_0_beta2)) { - if (useAllFields()) { - out.writeOptionalBoolean(true); - } else { - out.writeOptionalBoolean(null); - } - } - } + out.writeOptionalString(settings.quoteFieldSuffix()); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeBoolean(settings.autoGenerateSynonymsPhraseQuery()); out.writeVInt(settings.fuzzyPrefixLength()); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 9ff26b13212c7..66e83907d4993 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -303,11 +302,7 @@ public Status(List sliceStatuses, @Nullable String reasonCanc } public Status(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceId = in.readOptionalVInt(); - } else { - sliceId = null; - } + sliceId = in.readOptionalVInt(); total = in.readVLong(); updated = in.readVLong(); created = in.readVLong(); @@ -321,18 +316,12 @@ public Status(StreamInput in) throws IOException { requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); throttledUntil = in.readTimeValue(); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); - } else { - sliceStatuses = emptyList(); - } + sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalVInt(sliceId); - } + out.writeOptionalVInt(sliceId); out.writeVLong(total); out.writeVLong(updated); out.writeVLong(created); @@ -346,11 +335,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeFloat(requestsPerSecond); out.writeOptionalString(reasonCancelled); out.writeTimeValue(throttledUntil); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeVInt(sliceStatuses.size()); - for (StatusOrException sliceStatus : sliceStatuses) { - out.writeOptionalWriteable(sliceStatus); - } + out.writeVInt(sliceStatuses.size()); + for (StatusOrException sliceStatus : sliceStatuses) { + out.writeOptionalWriteable(sliceStatus); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 70f79a9def605..3ebd261b5847c 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -92,13 +92,8 @@ public RemoteInfo(StreamInput in) throws IOException { headers.put(in.readString(), in.readString()); } this.headers = unmodifiableMap(headers); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - socketTimeout = in.readTimeValue(); - connectTimeout = in.readTimeValue(); - } else { - socketTimeout = DEFAULT_SOCKET_TIMEOUT; - connectTimeout = DEFAULT_CONNECT_TIMEOUT; - } + socketTimeout = in.readTimeValue(); + connectTimeout = in.readTimeValue(); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { pathPrefix = in.readOptionalString(); } else { @@ -119,10 +114,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(header.getKey()); out.writeString(header.getValue()); } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeTimeValue(socketTimeout); - out.writeTimeValue(connectTimeout); - } + out.writeTimeValue(socketTimeout); + out.writeTimeValue(connectTimeout); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalString(pathPrefix); } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 8b33f2df8b16c..89cebf38a4013 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -120,7 +120,7 @@ public Query parseGroup(Type type, String field, Float boostValue, Object value, private Query combineGrouped(List groupQuery) { if (groupQuery == null || groupQuery.isEmpty()) { - return new MatchNoDocsQuery("[multi_match] list of group queries was empty"); + return zeroTermsQuery(); } if (groupQuery.size() == 1) { return groupQuery.get(0); diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index df96ff87ec256..d3bac583eac68 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -19,47 +19,21 @@ package org.elasticsearch.index.search; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.IpFieldMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; /** * Helpers to extract and expand field names and boosts */ public final class QueryParserHelper { - // Mapping types the "all-ish" query can be executed against - // TODO: Fix the API so that we don't need a hardcoded list of types - private static final Set ALLOWED_QUERY_MAPPER_TYPES; - - static { - ALLOWED_QUERY_MAPPER_TYPES = new HashSet<>(); - ALLOWED_QUERY_MAPPER_TYPES.add(DateFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(IpFieldMapper.CONTENT_TYPE); - ALLOWED_QUERY_MAPPER_TYPES.add(KeywordFieldMapper.CONTENT_TYPE); - for (NumberFieldMapper.NumberType nt : NumberFieldMapper.NumberType.values()) { - ALLOWED_QUERY_MAPPER_TYPES.add(nt.typeName()); - } - ALLOWED_QUERY_MAPPER_TYPES.add("scaled_float"); - ALLOWED_QUERY_MAPPER_TYPES.add(TextFieldMapper.CONTENT_TYPE); - } - private QueryParserHelper() {} /** @@ -85,22 +59,6 @@ public static Map parseFieldsAndWeights(List fields) { return fieldsAndWeights; } - /** - * Get a {@link FieldMapper} associated with a field name or null. - * @param mapperService The mapper service where to find the mapping. - * @param field The field name to search. - */ - public static Mapper getFieldMapper(MapperService mapperService, String field) { - DocumentMapper mapper = mapperService.documentMapper(); - if (mapper != null) { - Mapper fieldMapper = mapper.mappers().getMapper(field); - if (fieldMapper != null) { - return fieldMapper; - } - } - return null; - } - public static Map resolveMappingFields(QueryShardContext context, Map fieldsAndWeights) { return resolveMappingFields(context, fieldsAndWeights, null); @@ -138,8 +96,7 @@ public static Map resolveMappingFields(QueryShardContext context, * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. */ public static Map resolveMappingField(QueryShardContext context, String fieldOrPattern, float weight, @@ -154,8 +111,7 @@ public static Map resolveMappingField(QueryShardContext context, * @param fieldOrPattern The field name or the pattern to resolve * @param weight The weight for the field * @param acceptAllTypes Whether all field type should be added when a pattern is expanded. - * If false, only {@link #ALLOWED_QUERY_MAPPER_TYPES} are accepted and other field types - * are discarded from the query. + * If false, only searchable field types are added. * @param acceptMetadataField Whether metadata fields should be added when a pattern is expanded. * @param fieldSuffix The suffix name to add to the expanded field names if a mapping exists for that name. * The original name of the field is kept if adding the suffix to the field name does not point to a valid field @@ -177,18 +133,20 @@ public static Map resolveMappingField(QueryShardContext context, continue; } - // Ignore fields that are not in the allowed mapper types. Some - // types do not support term queries, and thus we cannot generate - // a special query for them. - String mappingType = fieldType.typeName(); - if (acceptAllTypes == false && ALLOWED_QUERY_MAPPER_TYPES.contains(mappingType) == false) { + if (acceptMetadataField == false && fieldType.name().startsWith("_")) { + // Ignore metadata fields continue; } - // Ignore metadata fields. - Mapper mapper = getFieldMapper(context.getMapperService(), fieldName); - if (acceptMetadataField == false && mapper instanceof MetadataFieldMapper) { - continue; + if (acceptAllTypes == false) { + try { + fieldType.termQuery("", context); + } catch (QueryShardException |UnsupportedOperationException e) { + // field type is never searchable with term queries (eg. geo point): ignore + continue; + } catch (IllegalArgumentException |ElasticsearchParseException e) { + // other exceptions are parsing errors or not indexed fields: keep + } } fields.put(fieldName, weight); } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 50406ed583487..fa2fd033bee0d 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -280,14 +280,14 @@ protected Query newMatchAllDocsQuery() { @Override public Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { - if (quoted) { - return getFieldQuery(field, queryText, getPhraseSlop()); - } - if (field != null && EXISTS_FIELD.equals(field)) { return existsQuery(queryText); } + if (quoted) { + return getFieldQuery(field, queryText, getPhraseSlop()); + } + // Detects additional operators '<', '<=', '>', '>=' to handle range query with one side unbounded. // It is required to use a prefix field operator to enable the detection since they are not treated // as logical operator by the query parser (e.g. age:>=10). @@ -333,6 +333,10 @@ public Query getFieldQuery(String field, String queryText, boolean quoted) throw @Override protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { + if (field != null && EXISTS_FIELD.equals(field)) { + return existsQuery(queryText); + } + Map fields = extractMultiFields(field, true); if (fields.isEmpty()) { return newUnmappedFieldQuery(field); @@ -347,6 +351,9 @@ protected Query getFieldQuery(String field, String queryText, int slop) throws P } queryBuilder.setPhraseSlop(slop); Query query = queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, fields, queryText, null); + if (query == null) { + return null; + } return applySlop(query, slop); } catch (IOException e) { throw new ParseException(e.getMessage()); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 25073d7ce15a1..aabdd742303e7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1314,7 +1314,7 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce **/ public void openEngineAndRecoverFromTranslog() throws IOException { innerOpenEngineAndTranslog(); - getEngine().recoverFromTranslog(); + getEngine().recoverFromTranslog(Long.MAX_VALUE); } /** diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index fc60543006648..f95cdb3a9f692 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FileSwitchDirectory; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; @@ -77,10 +76,21 @@ public Directory newDirectory() throws IOException { } protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - IndexModule.Type.FS.getSettingsKey()); + final String storeType = + indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()); if (IndexModule.Type.FS.match(storeType)) { - return FSDirectory.open(location, lockFactory); // use lucene defaults + final IndexModule.Type type = + IndexModule.defaultStoreType(IndexModule.NODE_STORE_ALLOW_MMAPFS.get(indexSettings.getNodeSettings())); + switch (type) { + case MMAPFS: + return new MMapDirectory(location, lockFactory); + case SIMPLEFS: + return new SimpleFSDirectory(location, lockFactory); + case NIOFS: + return new NIOFSDirectory(location, lockFactory); + default: + throw new AssertionError("unexpected built-in store type [" + type + "]"); + } } else if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new SimpleFSDirectory(location, lockFactory); } else if (IndexModule.Type.NIOFS.match(storeType)) { diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index bc77626b94277..43f1a278f54c3 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -160,7 +161,7 @@ private static void handleFieldWildcards(IndexShard indexShard, TermVectorsReque request.selectedFields(fieldNames.toArray(Strings.EMPTY_ARRAY)); } - private static boolean isValidField(MappedFieldType fieldType) { + private static boolean isValidField(MappedFieldType fieldType, IndexShard indexShard) { // must be a string if (fieldType instanceof StringFieldType == false) { return false; @@ -169,6 +170,16 @@ private static boolean isValidField(MappedFieldType fieldType) { if (fieldType.indexOptions() == IndexOptions.NONE) { return false; } + // and must not be under nested field + int dotIndex = fieldType.name().indexOf('.'); + while (dotIndex > -1) { + String parentField = fieldType.name().substring(0, dotIndex); + ObjectMapper mapper = indexShard.mapperService().getObjectMapper(parentField); + if (mapper != null && mapper.nested().isNested()) { + return false; + } + dotIndex = fieldType.name().indexOf('.', dotIndex + 1); + } return true; } @@ -177,7 +188,7 @@ private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetR Set validFields = new HashSet<>(); for (String field : selectedFields) { MappedFieldType fieldType = indexShard.mapperService().fullName(field); - if (!isValidField(fieldType)) { + if (isValidField(fieldType, indexShard) == false) { continue; } // already retrieved, only if the analyzer hasn't been overridden at the field @@ -284,7 +295,7 @@ private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVect Collection documentFields = new HashSet<>(); for (IndexableField field : doc.getFields()) { MappedFieldType fieldType = indexShard.mapperService().fullName(field.name()); - if (!isValidField(fieldType)) { + if (isValidField(fieldType, indexShard) == false) { continue; } if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index af6cb6c81cb50..f17acac37896d 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -577,21 +577,27 @@ public long getLastSyncedGlobalCheckpoint() { */ public Snapshot newSnapshot() throws IOException { try (ReleasableLock ignored = readLock.acquire()) { - return newSnapshotFromGen(getMinFileGeneration()); + return newSnapshotFromGen(new TranslogGeneration(translogUUID, getMinFileGeneration()), Long.MAX_VALUE); } } - public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { + public Snapshot newSnapshotFromGen(TranslogGeneration fromGeneration, long upToSeqNo) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); - if (minGeneration < getMinFileGeneration()) { - throw new IllegalArgumentException("requested snapshot generation [" + minGeneration + "] is not available. " + + final long fromFileGen = fromGeneration.translogFileGeneration; + if (fromFileGen < getMinFileGeneration()) { + throw new IllegalArgumentException("requested snapshot generation [" + fromFileGen + "] is not available. " + "Min referenced generation is [" + getMinFileGeneration() + "]"); } TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) - .filter(reader -> reader.getGeneration() >= minGeneration) + .filter(reader -> reader.getGeneration() >= fromFileGen && reader.getCheckpoint().minSeqNo <= upToSeqNo) .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); - return newMultiSnapshot(snapshots); + final Snapshot snapshot = newMultiSnapshot(snapshots); + if (upToSeqNo == Long.MAX_VALUE) { + return snapshot; + } else { + return new SeqNoFilterSnapshot(snapshot, Long.MIN_VALUE, upToSeqNo); + } } } @@ -926,7 +932,59 @@ default int overriddenOperations() { * Returns the next operation in the snapshot or null if we reached the end. */ Translog.Operation next() throws IOException; + } + + /** + * A filtered snapshot consisting of only operations whose sequence numbers are in the given range + * between {@code fromSeqNo} (inclusive) and {@code toSeqNo} (inclusive). This filtered snapshot + * shares the same underlying resources with the {@code delegate} snapshot, therefore we should not + * use the {@code delegate} after passing it to this filtered snapshot. + */ + static final class SeqNoFilterSnapshot implements Snapshot { + private final Snapshot delegate; + private int filteredOpsCount; + private final long fromSeqNo; // inclusive + private final long toSeqNo; // inclusive + SeqNoFilterSnapshot(Snapshot delegate, long fromSeqNo, long toSeqNo) { + assert fromSeqNo <= toSeqNo : "from_seq_no[" + fromSeqNo + "] > to_seq_no[" + toSeqNo + "]"; + this.delegate = delegate; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + } + + @Override + public int totalOperations() { + return delegate.totalOperations(); + } + + @Override + public int skippedOperations() { + return filteredOpsCount + delegate.skippedOperations(); + } + + @Override + public int overriddenOperations() { + return delegate.overriddenOperations(); + } + + @Override + public Operation next() throws IOException { + Translog.Operation op; + while ((op = delegate.next()) != null) { + if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo) { + return op; + } else { + filteredOpsCount++; + } + } + return null; + } + + @Override + public void close() throws IOException { + delegate.close(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 5c097ba774f4a..1c83a880511cd 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -228,6 +228,14 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; + + // do not allow any plugin-provided index store type to conflict with a built-in type + for (final String indexStoreType : indexStoreFactories.keySet()) { + if (IndexModule.isBuiltinType(indexStoreType)) { + throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); + } + } + this.indexStoreFactories = indexStoreFactories; } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index f01b4bb312174..fb7885a217e01 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -560,9 +560,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { } boolean includeNumDocs(Version version) { - if (version.major == Version.V_5_6_8.major) { - return version.onOrAfter(Version.V_5_6_8); - } return version.onOrAfter(Version.V_6_2_2); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 01bc402e43ba5..ae3416ef3b06d 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -22,14 +22,42 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledFuture; -import java.util.function.BiFunction; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.script.ScriptService; @@ -38,20 +66,40 @@ /** * Holder class for several ingest related services. */ -public class IngestService { +public class IngestService implements ClusterStateApplier { public static final String NOOP_PIPELINE_NAME = "_none"; - private final PipelineStore pipelineStore; - private final PipelineExecutionService pipelineExecutionService; + private final ClusterService clusterService; + private final Map processorFactories; + // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. + // We know of all the processor factories when a node with all its plugin have been initialized. Also some + // processor factories rely on other node services. Custom metadata is statically registered when classes + // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. + private volatile Map pipelines = new HashMap<>(); + private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); - public IngestService(Settings settings, ThreadPool threadPool, + public IngestService(ClusterService clusterService, ThreadPool threadPool, Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins) { - BiFunction> scheduler = - (delay, command) -> threadPool.schedule(TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command); - Processor.Parameters parameters = new Processor.Parameters(env, scriptService, analysisRegistry, - threadPool.getThreadContext(), threadPool::relativeTimeInMillis, scheduler); + this.clusterService = clusterService; + this.processorFactories = processorFactories( + ingestPlugins, + new Processor.Parameters( + env, scriptService, analysisRegistry, + threadPool.getThreadContext(), threadPool::relativeTimeInMillis, + (delay, command) -> threadPool.schedule( + TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC, command + ) + ) + ); + this.threadPool = threadPool; + } + + private static Map processorFactories(List ingestPlugins, + Processor.Parameters parameters) { Map processorFactories = new HashMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { Map newProcessors = ingestPlugin.getProcessors(parameters); @@ -61,24 +109,385 @@ public IngestService(Settings settings, ThreadPool threadPool, } } } - this.pipelineStore = new PipelineStore(settings, Collections.unmodifiableMap(processorFactories)); - this.pipelineExecutionService = new PipelineExecutionService(pipelineStore, threadPool); + return Collections.unmodifiableMap(processorFactories); + } + + public ClusterService getClusterService() { + return clusterService; + } + + /** + * Deletes the pipeline specified by id in the request. + */ + public void delete(DeletePipelineRequest request, ActionListener listener) { + clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerDelete(request, currentState); + } + }); + } + + static ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + if (currentIngestMetadata == null) { + return currentState; + } + Map pipelines = currentIngestMetadata.getPipelines(); + Set toRemove = new HashSet<>(); + for (String pipelineKey : pipelines.keySet()) { + if (Regex.simpleMatch(request.getId(), pipelineKey)) { + toRemove.add(pipelineKey); + } + } + if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { + throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); + } else if (toRemove.isEmpty()) { + return currentState; + } + final Map pipelinesCopy = new HashMap<>(pipelines); + for (String key : toRemove) { + pipelinesCopy.remove(key); + } + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) + .build()); + return newState.build(); } - public PipelineStore getPipelineStore() { - return pipelineStore; + /** + * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines + * may be returned + */ + // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't + // know how to serialize themselves. + public static List getPipelines(ClusterState clusterState, String... ids) { + IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); + return innerGetPipelines(ingestMetadata, ids); } - public PipelineExecutionService getPipelineExecutionService() { - return pipelineExecutionService; + static List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { + if (ingestMetadata == null) { + return Collections.emptyList(); + } + + // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') + if (ids.length == 0) { + return new ArrayList<>(ingestMetadata.getPipelines().values()); + } + + List result = new ArrayList<>(ids.length); + for (String id : ids) { + if (Regex.isSimpleMatchPattern(id)) { + for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { + if (Regex.simpleMatch(id, entry.getKey())) { + result.add(entry.getValue()); + } + } + } else { + PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); + if (pipeline != null) { + result.add(pipeline); + } + } + } + return result; + } + + /** + * Stores the specified pipeline definition in the request. + */ + public void putPipeline(Map ingestInfos, PutPipelineRequest request, + ActionListener listener) throws Exception { + // validates the pipeline and processor configuration before submitting a cluster update task: + validatePipeline(ingestInfos, request); + clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + return innerPut(request, currentState); + } + }); + } + + /** + * Returns the pipeline by the specified id + */ + public Pipeline getPipeline(String id) { + return pipelines.get(id); + } + + public Map getProcessorFactories() { + return processorFactories; } public IngestInfo info() { - Map processorFactories = pipelineStore.getProcessorFactories(); + Map processorFactories = getProcessorFactories(); List processorInfoList = new ArrayList<>(processorFactories.size()); for (Map.Entry entry : processorFactories.entrySet()) { processorInfoList.add(new ProcessorInfo(entry.getKey())); } return new IngestInfo(processorInfoList); } + + Map pipelines() { + return pipelines; + } + + @Override + public void applyClusterState(final ClusterChangedEvent event) { + ClusterState state = event.state(); + innerUpdatePipelines(event.previousState(), state); + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); + } + + static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { + IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); + Map pipelines; + if (currentIngestMetadata != null) { + pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); + } else { + pipelines = new HashMap<>(); + } + + pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) + .build()); + return newState.build(); + } + + void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { + if (ingestInfos.isEmpty()) { + throw new IllegalStateException("Ingest info is empty"); + } + + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories); + List exceptions = new ArrayList<>(); + for (Processor processor : pipeline.flattenAllProcessors()) { + for (Map.Entry entry : ingestInfos.entrySet()) { + if (entry.getValue().containsProcessor(processor.getType()) == false) { + String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; + exceptions.add( + ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message) + ); + } + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + public void executeBulkRequest(Iterable> actionRequests, + BiConsumer itemFailureHandler, Consumer completionHandler) { + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + completionHandler.accept(e); + } + + @Override + protected void doRun() { + for (DocWriteRequest actionRequest : actionRequests) { + IndexRequest indexRequest = null; + if (actionRequest instanceof IndexRequest) { + indexRequest = (IndexRequest) actionRequest; + } else if (actionRequest instanceof UpdateRequest) { + UpdateRequest updateRequest = (UpdateRequest) actionRequest; + indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); + } + if (indexRequest == null) { + continue; + } + String pipelineId = indexRequest.getPipeline(); + if (NOOP_PIPELINE_NAME.equals(pipelineId) == false) { + try { + Pipeline pipeline = pipelines.get(pipelineId); + if (pipeline == null) { + throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); + } + innerExecute(indexRequest, pipeline); + //this shouldn't be needed here but we do it for consistency with index api + // which requires it to prevent double execution + indexRequest.setPipeline(NOOP_PIPELINE_NAME); + } catch (Exception e) { + itemFailureHandler.accept(indexRequest, e); + } + } + } + completionHandler.accept(null); + } + }); + } + + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; + + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + Iterator iterator = newStatsPerPipeline.keySet().iterator(); + while (iterator.hasNext()) { + String pipeline = iterator.next(); + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + iterator.remove(); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { + if (pipeline.getProcessors().isEmpty()) { + return; + } + + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + Long version = indexRequest.version(); + VersionType versionType = indexRequest.versionType(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); + pipeline.execute(ingestDocument); + + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); + if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { + indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); + } + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } + } + + private void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + return; + } + + IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); + if (Objects.equals(ingestMetadata, previousIngestMetadata)) { + return; + } + + Map pipelines = new HashMap<>(); + List exceptions = new ArrayList<>(); + for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { + try { + pipelines.put(pipeline.getId(), Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); + } catch (ElasticsearchParseException e) { + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + exceptions.add(e); + } catch (Exception e) { + ElasticsearchParseException parseException = new ElasticsearchParseException( + "Error updating pipeline with id [" + pipeline.getId() + "]", e); + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + exceptions.add(parseException); + } + } + this.pipelines = Collections.unmodifiableMap(pipelines); + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + private static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index 1b0553a54902b..37dd3f52cb7d3 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -51,6 +51,27 @@ public Pipeline(String id, @Nullable String description, @Nullable Integer versi this.version = version; } + public static Pipeline create(String id, Map config, + Map processorFactories) throws Exception { + String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); + Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); + List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); + List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories); + List> onFailureProcessorConfigs = + ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); + List onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories); + if (config.isEmpty() == false) { + throw new ElasticsearchParseException("pipeline [" + id + + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); + } + if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { + throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); + } + CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), + Collections.unmodifiableList(onFailureProcessors)); + return new Pipeline(id, description, version, compoundProcessor); + } + /** * Modifies the data of a document to be indexed based on the processor this pipeline holds */ @@ -113,27 +134,4 @@ public List flattenAllProcessors() { return compoundProcessor.flattenProcessors(); } - public static final class Factory { - - public Pipeline create(String id, Map config, Map processorFactories) throws Exception { - String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); - Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); - List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); - List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories); - List> onFailureProcessorConfigs = - ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); - List onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories); - if (config.isEmpty() == false) { - throw new ElasticsearchParseException("pipeline [" + id + - "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); - } - if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { - throw new ElasticsearchParseException("pipeline [" + id + "] cannot have an empty on_failure option defined"); - } - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.unmodifiableList(processors), - Collections.unmodifiableList(onFailureProcessors)); - return new Pipeline(id, description, version, compoundProcessor); - } - - } } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index a2aa8e385e3f9..6778f3d1eaa6a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -19,7 +19,6 @@ package org.elasticsearch.ingest; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.ParseField; @@ -117,13 +116,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public static PipelineConfiguration readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class)); - } else { - final String id = in.readString(); - final BytesReference config = in.readBytesReference(); - return new PipelineConfiguration(id, config, XContentHelper.xContentType(config)); - } + return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class)); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -134,9 +127,7 @@ public static Diff readDiffFrom(StreamInput in) throws IO public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeEnum(xContentType); - } + out.writeEnum(xContentType); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java deleted file mode 100644 index 56d44ee888122..0000000000000 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -public class PipelineExecutionService implements ClusterStateApplier { - - private final PipelineStore store; - private final ThreadPool threadPool; - - private final StatsHolder totalStats = new StatsHolder(); - private volatile Map statsHolderPerPipeline = Collections.emptyMap(); - - public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { - this.store = store; - this.threadPool = threadPool; - } - - public void executeBulkRequest(Iterable> actionRequests, - BiConsumer itemFailureHandler, - Consumer completionHandler) { - threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception e) { - completionHandler.accept(e); - } - - @Override - protected void doRun() throws Exception { - for (DocWriteRequest actionRequest : actionRequests) { - IndexRequest indexRequest = null; - if (actionRequest instanceof IndexRequest) { - indexRequest = (IndexRequest) actionRequest; - } else if (actionRequest instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) actionRequest; - indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); - } - if (indexRequest == null) { - continue; - } - String pipeline = indexRequest.getPipeline(); - if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { - try { - innerExecute(indexRequest, getPipeline(indexRequest.getPipeline())); - //this shouldn't be needed here but we do it for consistency with index api - // which requires it to prevent double execution - indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); - } catch (Exception e) { - itemFailureHandler.accept(indexRequest, e); - } - } - } - completionHandler.accept(null); - } - }); - } - - public IngestStats stats() { - Map statsHolderPerPipeline = this.statsHolderPerPipeline; - - Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); - for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { - statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); - } - - return new IngestStats(totalStats.createStats(), statsPerPipeline); - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); - if (ingestMetadata != null) { - updatePipelineStats(ingestMetadata); - } - } - - void updatePipelineStats(IngestMetadata ingestMetadata) { - boolean changed = false; - Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); - Iterator iterator = newStatsPerPipeline.keySet().iterator(); - while (iterator.hasNext()) { - String pipeline = iterator.next(); - if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { - iterator.remove(); - changed = true; - } - } - for (String pipeline : ingestMetadata.getPipelines().keySet()) { - if (newStatsPerPipeline.containsKey(pipeline) == false) { - newStatsPerPipeline.put(pipeline, new StatsHolder()); - changed = true; - } - } - - if (changed) { - statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); - } - } - - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - if (pipeline.getProcessors().isEmpty()) { - return; - } - - long startTimeInNanos = System.nanoTime(); - // the pipeline specific stat holder may not exist and that is fine: - // (e.g. the pipeline may have been removed while we're ingesting a document - Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); - try { - totalStats.preIngest(); - pipelineStats.ifPresent(StatsHolder::preIngest); - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - Long version = indexRequest.version(); - VersionType versionType = indexRequest.versionType(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); - pipeline.execute(ingestDocument); - - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index((String) metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); - if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { - indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); - } - indexRequest.source(ingestDocument.getSourceAndMetadata()); - } catch (Exception e) { - totalStats.ingestFailed(); - pipelineStats.ifPresent(StatsHolder::ingestFailed); - throw e; - } finally { - long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); - totalStats.postIngest(ingestTimeInMillis); - pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); - } - } - - private Pipeline getPipeline(String pipelineId) { - Pipeline pipeline = store.get(pipelineId); - if (pipeline == null) { - throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); - } - return pipeline; - } - - static class StatsHolder { - - private final MeanMetric ingestMetric = new MeanMetric(); - private final CounterMetric ingestCurrent = new CounterMetric(); - private final CounterMetric ingestFailed = new CounterMetric(); - - void preIngest() { - ingestCurrent.inc(); - } - - void postIngest(long ingestTimeInMillis) { - ingestCurrent.dec(); - ingestMetric.inc(ingestTimeInMillis); - } - - void ingestFailed() { - ingestFailed.inc(); - } - - IngestStats.Stats createStats() { - return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); - } - - } - -} diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java deleted file mode 100644 index 9fceaf1a9a573..0000000000000 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -public class PipelineStore extends AbstractComponent implements ClusterStateApplier { - - private final Pipeline.Factory factory = new Pipeline.Factory(); - private final Map processorFactories; - - // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. - // We know of all the processor factories when a node with all its plugin have been initialized. Also some - // processor factories rely on other node services. Custom metadata is statically registered when classes - // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. - volatile Map pipelines = new HashMap<>(); - - public PipelineStore(Settings settings, Map processorFactories) { - super(settings); - this.processorFactories = processorFactories; - } - - @Override - public void applyClusterState(ClusterChangedEvent event) { - innerUpdatePipelines(event.previousState(), event.state()); - } - - void innerUpdatePipelines(ClusterState previousState, ClusterState state) { - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - - IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); - IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); - if (Objects.equals(ingestMetadata, previousIngestMetadata)) { - return; - } - - Map pipelines = new HashMap<>(); - List exceptions = new ArrayList<>(); - for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { - try { - pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); - } catch (ElasticsearchParseException e) { - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); - exceptions.add(e); - } catch (Exception e) { - ElasticsearchParseException parseException = new ElasticsearchParseException( - "Error updating pipeline with id [" + pipeline.getId() + "]", e); - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); - exceptions.add(parseException); - } - } - this.pipelines = Collections.unmodifiableMap(pipelines); - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - private Pipeline substitutePipeline(String id, ElasticsearchParseException e) { - String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; - String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; - String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; - Processor failureProcessor = new AbstractProcessor(tag) { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException(errorMessage); - } - - @Override - public String getType() { - return type; - } - }; - String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; - return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); - } - - /** - * Deletes the pipeline specified by id in the request. - */ - public void delete(ClusterService clusterService, DeletePipelineRequest request, ActionListener listener) { - clusterService.submitStateUpdateTask("delete-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerDelete(request, currentState); - } - }); - } - - ClusterState innerDelete(DeletePipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - if (currentIngestMetadata == null) { - return currentState; - } - Map pipelines = currentIngestMetadata.getPipelines(); - Set toRemove = new HashSet<>(); - for (String pipelineKey : pipelines.keySet()) { - if (Regex.simpleMatch(request.getId(), pipelineKey)) { - toRemove.add(pipelineKey); - } - } - if (toRemove.isEmpty() && Regex.isMatchAllPattern(request.getId()) == false) { - throw new ResourceNotFoundException("pipeline [{}] is missing", request.getId()); - } else if (toRemove.isEmpty()) { - return currentState; - } - final Map pipelinesCopy = new HashMap<>(pipelines); - for (String key : toRemove) { - pipelinesCopy.remove(key); - } - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelinesCopy)) - .build()); - return newState.build(); - } - - /** - * Stores the specified pipeline definition in the request. - */ - public void put(ClusterService clusterService, Map ingestInfos, PutPipelineRequest request, - ActionListener listener) throws Exception { - // validates the pipeline and processor configuration before submitting a cluster update task: - validatePipeline(ingestInfos, request); - clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - return new AcknowledgedResponse(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return innerPut(request, currentState); - } - }); - } - - void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { - if (ingestInfos.isEmpty()) { - throw new IllegalStateException("Ingest info is empty"); - } - - Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = factory.create(request.getId(), pipelineConfig, processorFactories); - List exceptions = new ArrayList<>(); - for (Processor processor : pipeline.flattenAllProcessors()) { - for (Map.Entry entry : ingestInfos.entrySet()) { - if (entry.getValue().containsProcessor(processor.getType()) == false) { - String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; - exceptions.add(ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message)); - } - } - } - ExceptionsHelper.rethrowAndSuppress(exceptions); - } - - ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { - IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); - Map pipelines; - if (currentIngestMetadata != null) { - pipelines = new HashMap<>(currentIngestMetadata.getPipelines()); - } else { - pipelines = new HashMap<>(); - } - - pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType())); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) - .build()); - return newState.build(); - } - - /** - * Returns the pipeline by the specified id - */ - public Pipeline get(String id) { - return pipelines.get(id); - } - - public Map getProcessorFactories() { - return processorFactories; - } - - /** - * @return pipeline configuration specified by id. If multiple ids or wildcards are specified multiple pipelines - * may be returned - */ - // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't - // know how to serialize themselves. - public List getPipelines(ClusterState clusterState, String... ids) { - IngestMetadata ingestMetadata = clusterState.getMetaData().custom(IngestMetadata.TYPE); - return innerGetPipelines(ingestMetadata, ids); - } - - List innerGetPipelines(IngestMetadata ingestMetadata, String... ids) { - if (ingestMetadata == null) { - return Collections.emptyList(); - } - - // if we didn't ask for _any_ ID, then we get them all (this is the same as if they ask for '*') - if (ids.length == 0) { - return new ArrayList<>(ingestMetadata.getPipelines().values()); - } - - List result = new ArrayList<>(ids.length); - for (String id : ids) { - if (Regex.isSimpleMatchPattern(id)) { - for (Map.Entry entry : ingestMetadata.getPipelines().entrySet()) { - if (Regex.simpleMatch(id, entry.getKey())) { - result.add(entry.getValue()); - } - } - } else { - PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(id); - if (pipeline != null) { - result.add(pipeline); - } - } - } - return result; - } -} diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 637f4cf1cbe00..3bdfe95f1e2c6 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -52,11 +52,7 @@ public OsStats(StreamInput in) throws IOException { this.cpu = new Cpu(in); this.mem = new Mem(in); this.swap = new Swap(in); - if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.cgroup = in.readOptionalWriteable(Cgroup::new); - } else { - this.cgroup = null; - } + this.cgroup = in.readOptionalWriteable(Cgroup::new); } @Override @@ -65,9 +61,7 @@ public void writeTo(StreamOutput out) throws IOException { cpu.writeTo(out); mem.writeTo(out); swap.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - out.writeOptionalWriteable(cgroup); - } + out.writeOptionalWriteable(cgroup); } public long getTimestamp() { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 274157b427ad6..93052841a3208 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -352,7 +352,7 @@ protected Node(final Environment environment, Collection final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.addStateApplier(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final IngestService ingestService = new IngestService(settings, threadPool, this.environment, + final IngestService ingestService = new IngestService(clusterService, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, clusterService.getClusterSettings(), client); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 0e19b5a650221..207886c5cf263 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -83,8 +82,7 @@ public class NodeService extends AbstractComponent implements Closeable { this.scriptService = scriptService; this.responseCollectorService = responseCollectorService; this.searchTransportService = searchTransportService; - clusterService.addStateApplier(ingestService.getPipelineStore()); - clusterService.addStateApplier(ingestService.getPipelineExecutionService()); + clusterService.addStateApplier(ingestService); } public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool, @@ -120,7 +118,7 @@ public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, bo circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, discoveryStats ? discovery.stats() : null, - ingest ? ingestService.getPipelineExecutionService().stats() : null, + ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null ); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index f81b7c770e56c..b7a179e41e381 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -188,7 +188,7 @@ public long getNumberOfTasksOnNode(String nodeId, String taskName) { @Override public Version getMinimalSupportedVersion() { - return Version.V_5_4_0; + return Version.CURRENT.minimumCompatibilityVersion(); } @Override diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 74a911b0ae4fc..d211efef5173e 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -107,11 +107,7 @@ public PluginInfo(final StreamInput in) throws IOException { } else { extendedPlugins = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - hasNativeController = in.readBoolean(); - } else { - hasNativeController = false; - } + hasNativeController = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta2) && in.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was @@ -134,9 +130,7 @@ public void writeTo(final StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeStringList(extendedPlugins); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeBoolean(hasNativeController); - } + out.writeBoolean(hasNativeController); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta2) && out.getVersion().before(Version.V_6_3_0)) { /* * Elasticsearch versions in [6.0.0-beta2, 6.3.0) allowed plugins to specify that they require the keystore and this was diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e113240bee3dc..e66fad272c94e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,7 +39,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -719,7 +718,7 @@ public boolean isReadOnly() { protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException { assert isReadOnly() == false; // can not write to a read only repository final long currentGen = latestIndexBlobId(); - if (repositoryStateId != SnapshotsInProgress.UNDEFINED_REPOSITORY_STATE_ID && currentGen != repositoryStateId) { + if (currentGen != repositoryStateId) { // the index file was updated by a concurrent operation, so we were operating on stale // repository data throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 2a60262b32f57..6239015dae418 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -86,6 +86,14 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a int preFilterShardSize = restRequest.paramAsInt("pre_filter_shard_size", SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE); + final Integer maxConcurrentShardRequests; + if (restRequest.hasParam("max_concurrent_shard_requests")) { + // only set if we have the parameter since we auto adjust the max concurrency on the coordinator + // based on the number of nodes in the cluster + maxConcurrentShardRequests = restRequest.paramAsInt("max_concurrent_shard_requests", Integer.MIN_VALUE); + } else { + maxConcurrentShardRequests = null; + } parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> { searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); @@ -96,6 +104,9 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a for (SearchRequest request : requests) { // preserve if it's set on the request request.setPreFilterShardSize(Math.min(preFilterShardSize, request.getPreFilterShardSize())); + if (maxConcurrentShardRequests != null) { + request.setMaxConcurrentShardRequests(maxConcurrentShardRequests); + } } return multiRequest; } diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index a64a3ecd37640..67ea4f24b83f8 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -451,133 +450,24 @@ public Script(ScriptType type, String lang, String idOrCode, Map * Creates a {@link Script} read from an input stream. */ public Script(StreamInput in) throws IOException { - // Version 5.3 allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - this.type = ScriptType.readFrom(in); - this.lang = in.readOptionalString(); - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - this.options = options; - this.params = in.readMap(); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the read in contents to be in - // same order as the constructor. - } else if (in.getVersion().onOrAfter(Version.V_5_1_1)) { - this.type = ScriptType.readFrom(in); - String lang = in.readString(); - this.lang = this.type == ScriptType.STORED ? null : lang; - - this.idOrCode = in.readString(); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)in.readMap(); - - if (this.type != ScriptType.INLINE && options.isEmpty()) { - this.options = null; - } else { - this.options = options; - } - - this.params = in.readMap(); - // Prior to version 5.1 the script members are read in certain cases as optional and given - // default values when necessary. Also the only option supported is for XContentType. - } else { - this.idOrCode = in.readString(); - - if (in.readBoolean()) { - this.type = ScriptType.readFrom(in); - } else { - this.type = DEFAULT_SCRIPT_TYPE; - } - - String lang = in.readOptionalString(); - - if (lang == null) { - this.lang = this.type == ScriptType.STORED ? null : DEFAULT_SCRIPT_LANG; - } else { - this.lang = lang; - } - - Map params = in.readMap(); - - if (params == null) { - this.params = new HashMap<>(); - } else { - this.params = params; - } - - if (in.readBoolean()) { - this.options = new HashMap<>(); - XContentType contentType = in.readEnum(XContentType.class); - this.options.put(CONTENT_TYPE_OPTION, contentType.mediaType()); - } else if (type == ScriptType.INLINE) { - options = new HashMap<>(); - } else { - this.options = null; - } - } + this.type = ScriptType.readFrom(in); + this.lang = in.readOptionalString(); + this.idOrCode = in.readString(); + @SuppressWarnings("unchecked") + Map options = (Map)(Map)in.readMap(); + this.options = options; + this.params = in.readMap(); } @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ allows lang to be an optional parameter for stored scripts and expects - // options to be null for stored and file scripts. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - type.writeTo(out); - out.writeOptionalString(lang); - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - out.writeMap(options); - out.writeMap(params); - // Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential - // for more options than just XContentType. Reorders the written out contents to be in - // same order as the constructor. - } else if (out.getVersion().onOrAfter(Version.V_5_1_1)) { - type.writeTo(out); - - if (lang == null) { - out.writeString(""); - } else { - out.writeString(lang); - } - - out.writeString(idOrCode); - @SuppressWarnings("unchecked") - Map options = (Map)(Map)this.options; - - if (options == null) { - out.writeMap(new HashMap<>()); - } else { - out.writeMap(options); - } - - out.writeMap(params); - // Prior to version 5.1 the Script members were possibly written as optional or null, though there is no case where a null - // value wasn't equivalent to it's default value when actually compiling/executing a script. Meaning, there are no - // backwards compatibility issues, and now there's enforced consistency. Also the only supported compiler - // option was XContentType. - } else { - out.writeString(idOrCode); - out.writeBoolean(true); - type.writeTo(out); - out.writeOptionalString(lang); - - if (params.isEmpty()) { - out.writeMap(null); - } else { - out.writeMap(params); - } - - if (options != null && options.containsKey(CONTENT_TYPE_OPTION)) { - XContentType contentType = XContentType.fromMediaTypeOrFormat(options.get(CONTENT_TYPE_OPTION)); - out.writeBoolean(true); - out.writeEnum(contentType); - } else { - out.writeBoolean(false); - } - } + type.writeTo(out); + out.writeOptionalString(lang); + out.writeString(idOrCode); + @SuppressWarnings("unchecked") + Map options = (Map) (Map) this.options; + out.writeMap(options); + out.writeMap(params); } /** diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 59d824eb313e0..35a7c2e60d685 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -292,25 +292,7 @@ public ScriptMetaData(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { String id = in.readString(); - - // Prior to version 5.3 all scripts were stored using the deprecated namespace. - // Split the id to find the language then use StoredScriptSource to parse the - // expected BytesReference after which a new StoredScriptSource is created - // with the appropriate language and options. - if (in.getVersion().before(Version.V_5_3_0)) { - int split = id.indexOf('#'); - - if (split == -1) { - throw new IllegalArgumentException("illegal stored script id [" + id + "], does not contain lang"); - } else { - source = new StoredScriptSource(in); - source = new StoredScriptSource(id.substring(0, split), source.getSource(), Collections.emptyMap()); - } - // Version 5.3+ can just be parsed normally using StoredScriptSource. - } else { - source = new StoredScriptSource(in); - } - + source = new StoredScriptSource(in); scripts.put(id, source); } @@ -319,34 +301,11 @@ public ScriptMetaData(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - // Version 5.3+ will output the contents of the scripts' Map using - // StoredScriptSource to stored the language, code, and options. - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeVInt(scripts.size()); - - for (Map.Entry entry : scripts.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - // Prior to Version 5.3, stored scripts can only be read using the deprecated - // namespace. Scripts using the deprecated namespace are first isolated in a - // temporary Map, then written out. Since all scripts will be stored using the - // deprecated namespace, no scripts will be lost. - } else { - Map filtered = new HashMap<>(); - - for (Map.Entry entry : scripts.entrySet()) { - if (entry.getKey().contains("#")) { - filtered.put(entry.getKey(), entry.getValue()); - } - } - - out.writeVInt(filtered.size()); + out.writeVInt(scripts.size()); - for (Map.Entry entry : filtered.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + for (Map.Entry entry : scripts.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 4bf5e03b8a7cc..a7db2c55fe149 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -98,6 +98,8 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -789,14 +791,21 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.fetchSourceContext(source.fetchSource()); } if (source.docValueFields() != null) { + List docValueFields = new ArrayList<>(); + for (DocValueFieldsContext.FieldAndFormat format : source.docValueFields()) { + Collection fieldNames = context.mapperService().simpleMatchToFullName(format.field); + for (String fieldName: fieldNames) { + docValueFields.add(new DocValueFieldsContext.FieldAndFormat(fieldName, format.format)); + } + } int maxAllowedDocvalueFields = context.mapperService().getIndexSettings().getMaxDocvalueFields(); - if (source.docValueFields().size() > maxAllowedDocvalueFields) { + if (docValueFields.size() > maxAllowedDocvalueFields) { throw new IllegalArgumentException( - "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields - + "] but was [" + source.docValueFields().size() + "]. This limit can be set by changing the [" - + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields + + "] but was [" + docValueFields.size() + "]. This limit can be set by changing the [" + + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); } - context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields())); + context.docValueFieldsContext(new DocValueFieldsContext(docValueFields)); } if (source.highlighter() != null) { HighlightBuilder highlightBuilder = source.highlighter(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 19c0f8c64d58b..4a46c7202d14e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -19,7 +19,6 @@ package org.elasticsearch.search; -import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,11 +51,7 @@ public SearchShardTarget(StreamInput in) throws IOException { } shardId = ShardId.readShardId(in); this.originalIndices = null; - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - clusterAlias = in.readOptionalString(); - } else { - clusterAlias = null; - } + clusterAlias = in.readOptionalString(); } public SearchShardTarget(String nodeId, ShardId shardId, String clusterAlias, OriginalIndices originalIndices) { @@ -121,9 +116,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeText(nodeId); } shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeOptionalString(clusterAlias); - } + out.writeOptionalString(clusterAlias); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 9e3012c5eb9d6..8154108f9f0bc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -36,7 +36,6 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -424,13 +423,8 @@ public IncludeExclude(StreamInput in) throws IOException { } else { excludeValues = null; } - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - incNumPartitions = in.readVInt(); - incZeroBasedPartition = in.readVInt(); - } else { - incNumPartitions = 0; - incZeroBasedPartition = 0; - } + incNumPartitions = in.readVInt(); + incZeroBasedPartition = in.readVInt(); } @Override @@ -457,10 +451,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBytesRef(value); } } - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeVInt(incNumPartitions); - out.writeVInt(incZeroBasedPartition); - } + out.writeVInt(incNumPartitions); + out.writeVInt(incZeroBasedPartition); } } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c42a1a12a1877..c7564dc5ea835 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -248,9 +248,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); - if (in.getVersion().onOrAfter(Version.V_5_3_0)) { - collapse = in.readOptionalWriteable(CollapseBuilder::new); - } + collapse = in.readOptionalWriteable(CollapseBuilder::new); if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { trackTotalHits = in.readBoolean(); } else { @@ -313,9 +311,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); - if (out.getVersion().onOrAfter(Version.V_5_3_0)) { - out.writeOptionalWriteable(collapse); - } + out.writeOptionalWriteable(collapse); if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { out.writeBoolean(trackTotalHits); } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index ccab5e2cb93b3..2ebf413b1405d 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.collapse; import org.apache.lucene.index.IndexOptions; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -94,31 +93,14 @@ public CollapseBuilder(String field) { public CollapseBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.maxConcurrentGroupRequests = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - this.innerHits = in.readList(InnerHitBuilder::new); - } else { - InnerHitBuilder innerHitBuilder = in.readOptionalWriteable(InnerHitBuilder::new); - if (innerHitBuilder != null) { - this.innerHits = Collections.singletonList(innerHitBuilder); - } else { - this.innerHits = Collections.emptyList(); - } - } + this.innerHits = in.readList(InnerHitBuilder::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeVInt(maxConcurrentGroupRequests); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeList(innerHits); - } else { - boolean hasInnerHit = innerHits.isEmpty() == false; - out.writeBoolean(hasInnerHit); - if (hasInnerHit) { - innerHits.get(0).writeToCollapseBWC(out); - } - } + out.writeList(innerHits); } public static CollapseBuilder fromXContent(XContentParser parser) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 7888f6cd5a098..161ca9279f094 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -152,17 +151,13 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { order(in.readOptionalWriteable(Order::readFromStream)); highlightFilter(in.readOptionalBoolean()); forceSource(in.readOptionalBoolean()); - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream)); - } + boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream)); boundaryMaxScan(in.readOptionalVInt()); if (in.readBoolean()) { boundaryChars(in.readString().toCharArray()); } - if (in.getVersion().onOrAfter(Version.V_5_4_0)) { - if (in.readBoolean()) { - boundaryScannerLocale(in.readString()); - } + if (in.readBoolean()) { + boundaryScannerLocale(in.readString()); } noMatchSize(in.readOptionalVInt()); phraseLimit(in.readOptionalVInt()); @@ -191,21 +186,17 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(order); out.writeOptionalBoolean(highlightFilter); out.writeOptionalBoolean(forceSource); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeOptionalWriteable(boundaryScannerType); - } + out.writeOptionalWriteable(boundaryScannerType); out.writeOptionalVInt(boundaryMaxScan); boolean hasBounaryChars = boundaryChars != null; out.writeBoolean(hasBounaryChars); if (hasBounaryChars) { out.writeString(String.valueOf(boundaryChars)); } - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - boolean hasBoundaryScannerLocale = boundaryScannerLocale != null; - out.writeBoolean(hasBoundaryScannerLocale); - if (hasBoundaryScannerLocale) { - out.writeString(boundaryScannerLocale.toLanguageTag()); - } + boolean hasBoundaryScannerLocale = boundaryScannerLocale != null; + out.writeBoolean(hasBoundaryScannerLocale); + if (hasBoundaryScannerLocale) { + out.writeString(boundaryScannerLocale.toLanguageTag()); } out.writeOptionalVInt(noMatchSize); out.writeOptionalVInt(phraseLimit); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index cf656ed3b9cb2..72a12b805eb17 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; -import java.util.Optional; /** * Shard level search request that gets created and consumed on the local node. @@ -213,25 +212,10 @@ protected void innerReadFrom(StreamInput in) throws IOException { source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); aliasFilter = new AliasFilter(in); - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - indexBoost = in.readFloat(); - } else { - // Nodes < 5.2.0 doesn't send index boost. Read it from source. - if (source != null) { - Optional boost = source.indexBoosts() - .stream() - .filter(ib -> ib.getIndex().equals(shardId.getIndexName())) - .findFirst(); - indexBoost = boost.isPresent() ? boost.get().getBoost() : 1.0f; - } else { - indexBoost = 1.0f; - } - } + indexBoost = in.readFloat(); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_5_6_0)) { - clusterAlias = in.readOptionalString(); - } + clusterAlias = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } @@ -254,16 +238,12 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException out.writeOptionalWriteable(source); out.writeStringArray(types); aliasFilter.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeFloat(indexBoost); - } + out.writeFloat(indexBoost); if (asKey == false) { out.writeVLong(nowInMillis); } out.writeOptionalBoolean(requestCache); - if (out.getVersion().onOrAfter(Version.V_5_6_0)) { - out.writeOptionalString(clusterAlias); - } + out.writeOptionalString(clusterAlias); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ca06005448c0d..84c76e85f3dd0 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -95,7 +95,7 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep suggestPhase.execute(searchContext); // TODO: fix this once we can fetch docs for suggestions searchContext.queryResult().topDocs( - new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, 0), + new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, Float.NaN), new DocValueFormat[0]); return; } diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index dc110b2797710..8d40cc802fffd 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -120,7 +120,7 @@ Collector create(Collector in) { @Override void postProcess(QuerySearchResult result) { final int totalHitCount = hitCountSupplier.getAsInt(); - result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, 0), null); + result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, Float.NaN), null); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 67ddabc37fa30..fdbe74d8d4dd9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -76,9 +76,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, private static final String SUCCESSFUL_SHARDS = "successful_shards"; private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; - private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0; private static final Version INCLUDE_GLOBAL_STATE_INTRODUCED = Version.V_6_2_0; - public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0; private static final Comparator COMPARATOR = Comparator.comparing(SnapshotInfo::startTime).thenComparing(SnapshotInfo::snapshotId); @@ -275,11 +273,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { indicesListBuilder.add(in.readString()); } indices = Collections.unmodifiableList(indicesListBuilder); - if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; - } else { - state = SnapshotState.fromValue(in.readByte()); - } + state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; reason = in.readOptionalString(); startTime = in.readVLong(); endTime = in.readVLong(); @@ -295,11 +289,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { } else { shardFailures = Collections.emptyList(); } - if (in.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - version = Version.readVersion(in); - } else { - version = in.readBoolean() ? Version.readVersion(in) : null; - } + version = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { includeGlobalState = in.readOptionalBoolean(); } @@ -681,19 +671,11 @@ public void writeTo(final StreamOutput out) throws IOException { for (String index : indices) { out.writeString(index); } - if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) { - if (state != null) { - out.writeBoolean(true); - out.writeByte(state.value()); - } else { - out.writeBoolean(false); - } + if (state != null) { + out.writeBoolean(true); + out.writeByte(state.value()); } else { - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED) && state == SnapshotState.INCOMPATIBLE) { - out.writeByte(SnapshotState.FAILED.value()); - } else { - out.writeByte(state.value()); - } + out.writeBoolean(false); } out.writeOptionalString(reason); out.writeVLong(startTime); @@ -704,19 +686,11 @@ public void writeTo(final StreamOutput out) throws IOException { for (SnapshotShardFailure failure : shardFailures) { failure.writeTo(out); } - if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) { - Version versionToWrite = version; - if (versionToWrite == null) { - versionToWrite = Version.CURRENT; - } - Version.writeVersion(versionToWrite, out); + if (version != null) { + out.writeBoolean(true); + Version.writeVersion(version, out); } else { - if (version != null) { - out.writeBoolean(true); - Version.writeVersion(version, out); - } else { - out.writeBoolean(false); - } + out.writeBoolean(false); } if (out.getVersion().onOrAfter(INCLUDE_GLOBAL_STATE_INTRODUCED)) { out.writeOptionalBoolean(includeGlobalState); diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 84c337399d5b4..da8faaf3c33f4 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -38,12 +38,12 @@ import java.io.Closeable; import java.io.IOException; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -62,6 +62,7 @@ public class ConnectionManager implements Closeable { private final TimeValue pingSchedule; private final ConnectionProfile defaultProfile; private final Lifecycle lifecycle = new Lifecycle(); + private final AtomicBoolean closed = new AtomicBoolean(false); private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); @@ -83,7 +84,7 @@ public ConnectionManager(Settings settings, Transport transport, ThreadPool thre } public void addListener(TransportConnectionListener listener) { - this.connectionListener.listeners.add(listener); + this.connectionListener.listeners.addIfAbsent(listener); } public void removeListener(TransportConnectionListener listener) { @@ -186,45 +187,50 @@ public void disconnectFromNode(DiscoveryNode node) { } } - public int connectedNodeCount() { + /** + * Returns the number of nodes this manager is connected to. + */ + public int size() { return connectedNodes.size(); } @Override public void close() { - lifecycle.moveToStopped(); - CountDownLatch latch = new CountDownLatch(1); + if (closed.compareAndSet(false, true)) { + lifecycle.moveToStopped(); + CountDownLatch latch = new CountDownLatch(1); - // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); + // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } } + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); } - } finally { - closeLock.writeLock().unlock(); - latch.countDown(); - } - }); + }); - try { try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore + } + } finally { + lifecycle.moveToClosed(); } - } finally { - lifecycle.moveToClosed(); } } @@ -288,7 +294,7 @@ public void onFailure(Exception e) { private static final class DelegatingNodeConnectionListener implements TransportConnectionListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); @Override public void onNodeDisconnected(DiscoveryNode key) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 15cf7899dc03f..5621b38557814 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -80,16 +81,17 @@ final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { private final TransportService transportService; + private final ConnectionManager connectionManager; private final ConnectionProfile remoteProfile; private final ConnectedNodes connectedNodes; private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; + private final ThreadPool threadPool; private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); - private final ClusterName localClusterName; /** * Creates a new {@link RemoteClusterConnection} @@ -97,13 +99,14 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param clusterAlias the configured alias of the cluster to connect to * @param seedNodes a list of seed nodes to discover eligible nodes from * @param transportService the local nodes transport service + * @param connectionManager the connection manager to use for this remote connection * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to */ RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, - TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { + TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, + Predicate nodePredicate) { super(settings); - this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -122,7 +125,11 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE .getConcreteSettingForNamespace(clusterAlias).get(settings); this.connectHandler = new ConnectHandler(); - transportService.addConnectionListener(this); + this.threadPool = transportService.threadPool; + this.connectionManager = connectionManager; + connectionManager.addListener(this); + // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. + connectionManager.addListener(transportService); } /** @@ -183,8 +190,9 @@ public void ensureConnected(ActionListener voidActionListener) { private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, final ActionListener listener) { - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterSearchShardsAction.NAME, searchShardsRequest, TransportRequestOptions.EMPTY, new TransportResponseHandler() { @Override @@ -219,12 +227,16 @@ void collectNodes(ActionListener> listener) { request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other - final DiscoveryNode node = connectedNodes.getAny(); - transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + final DiscoveryNode node = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(node); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, new TransportResponseHandler() { + @Override - public ClusterStateResponse newInstance() { - return new ClusterStateResponse(); + public ClusterStateResponse read(StreamInput in) throws IOException { + ClusterStateResponse response = new ClusterStateResponse(); + response.readFrom(in); + return response; } @Override @@ -261,11 +273,11 @@ public String executor() { * If such node is not connected, the returned connection will be a proxy connection that redirects to it. */ Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { - if (transportService.nodeConnected(remoteClusterNode)) { - return transportService.getConnection(remoteClusterNode); + if (connectionManager.nodeConnected(remoteClusterNode)) { + return connectionManager.getConnection(remoteClusterNode); } - DiscoveryNode discoveryNode = connectedNodes.getAny(); - Transport.Connection connection = transportService.getConnection(discoveryNode); + DiscoveryNode discoveryNode = getAnyConnectedNode(); + Transport.Connection connection = connectionManager.getConnection(discoveryNode); return new ProxyConnection(connection, remoteClusterNode); } @@ -317,33 +329,18 @@ public Version getVersion() { } Transport.Connection getConnection() { - return transportService.getConnection(getAnyConnectedNode()); + return connectionManager.getConnection(getAnyConnectedNode()); } @Override public void close() throws IOException { - connectHandler.close(); + IOUtils.close(connectHandler, connectionManager); } public boolean isClosed() { return connectHandler.isClosed(); } - private ConnectionProfile getRemoteProfile(ClusterName name) { - // we can only compare the cluster name to make a decision if we should use a remote profile - // we can't use a cluster UUID here since we could be connecting to that remote cluster before - // the remote node has joined its cluster and have a cluster UUID. The fact that we just lose a - // rather smallish optimization on the connection layer under certain situations where remote clusters - // have the same name as the local one is minor here. - // the alternative here is to complicate the remote infrastructure to also wait until we formed a cluster, - // gained a cluster UUID and then start connecting etc. we rather use this simplification in order to maintain simplicity - if (this.localClusterName.equals(name)) { - return null; - } else { - return remoteProfile; - } - } - /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -387,7 +384,7 @@ private void connect(ActionListener connectListener, boolean forceRun) { final boolean runConnect; final Collection> toNotify; final ActionListener listener = connectListener == null ? null : - ContextPreservingActionListener.wrapPreservingContext(connectListener, transportService.getThreadPool().getThreadContext()); + ContextPreservingActionListener.wrapPreservingContext(connectListener, threadPool.getThreadContext()); synchronized (queue) { if (listener != null && queue.offer(listener) == false) { listener.onFailure(new RejectedExecutionException("connect queue is full")); @@ -415,7 +412,6 @@ private void connect(ActionListener connectListener, boolean forceRun) { } private void forkConnect(final Collection> toNotify) { - ThreadPool threadPool = transportService.getThreadPool(); ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); executor.submit(new AbstractRunnable() { @Override @@ -452,13 +448,13 @@ protected void doRun() { maybeConnect(); } }); - collectRemoteNodes(seedNodes.iterator(), transportService, listener); + collectRemoteNodes(seedNodes.iterator(), transportService, connectionManager, listener); } }); } private void collectRemoteNodes(Iterator> seedNodes, - final TransportService transportService, ActionListener listener) { + final TransportService transportService, final ConnectionManager manager, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); } @@ -467,7 +463,7 @@ private void collectRemoteNodes(Iterator> seedNodes, cancellableThreads.executeIO(() -> { final DiscoveryNode seedNode = seedNodes.next().get(); final TransportService.HandshakeResponse handshakeResponse; - Transport.Connection connection = transportService.openConnection(seedNode, + Transport.Connection connection = manager.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); boolean success = false; try { @@ -482,7 +478,7 @@ private void collectRemoteNodes(Iterator> seedNodes, final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { - transportService.connectToNode(handshakeNode, getRemoteProfile(handshakeResponse.getClusterName())); + manager.connectToNode(handshakeNode, remoteProfile, transportService.connectionValidator(handshakeNode)); if (remoteClusterName.get() == null) { assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); @@ -524,7 +520,7 @@ private void collectRemoteNodes(Iterator> seedNodes, // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, manager, listener); } else { listener.onFailure(ex); } @@ -552,7 +548,6 @@ final boolean isClosed() { /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ private class SniffClusterStateResponseHandler implements TransportResponseHandler { - private final TransportService transportService; private final Transport.Connection connection; private final ActionListener listener; private final Iterator> seedNodes; @@ -561,7 +556,6 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, ActionListener listener, Iterator> seedNodes, CancellableThreads cancellableThreads) { - this.transportService = transportService; this.connection = connection; this.listener = listener; this.seedNodes = seedNodes; @@ -592,8 +586,8 @@ public void handleResponse(ClusterStateResponse response) { for (DiscoveryNode node : nodesIter) { if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { try { - transportService.connectToNode(node, getRemoteProfile(remoteClusterName.get())); // noop if node is - // connected + connectionManager.connectToNode(node, remoteProfile, + transportService.connectionValidator(node)); // noop if node is connected connectedNodes.add(node); } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node @@ -609,7 +603,7 @@ public void handleResponse(ClusterStateResponse response) { listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -620,7 +614,7 @@ public void handleException(TransportException exp) { IOUtils.closeWhileHandlingException(connection); } finally { // once the connection is closed lets try the next node - collectRemoteNodes(seedNodes, transportService, listener); + collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } } @@ -715,4 +709,8 @@ private synchronized void ensureIteratorAvailable() { } } } + + ConnectionManager getConnectionManager() { + return connectionManager; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 956a0d94179e0..34f13b672874f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.Collection; import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -139,7 +140,8 @@ private synchronized void updateRemoteClusters(Map getConnections() { + return remoteClusters.values(); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index fb14ae96dbf20..e37ea81211ad7 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -56,6 +57,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; @@ -268,8 +270,9 @@ protected void doStart() { @Override protected void doStop() { try { - connectionManager.close(); - transport.stop(); + IOUtils.close(connectionManager, remoteClusterService, transport::stop); + } catch (IOException e) { + throw new UncheckedIOException(e); } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles @@ -306,7 +309,7 @@ public void doRun() { @Override protected void doClose() throws IOException { - IOUtils.close(remoteClusterService, transport); + transport.close(); } /** @@ -364,14 +367,18 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection if (isLocalNode(node)) { return; } + connectionManager.connectToNode(node, connectionProfile, connectionValidator(node)); + } - connectionManager.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { + public CheckedBiConsumer connectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile) -> { // We don't validate cluster names to allow for CCS connections. final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true).discoveryNode; if (validateConnections && node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } - }); + }; + } /** @@ -562,8 +569,12 @@ public final void sendRequest(final Transport.Conn final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { - - asyncSender.sendRequest(connection, action, request, options, handler); + try { + asyncSender.sendRequest(connection, action, request, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } } /** diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1f62eb706a84b..5c8c25cbfddfe 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -41,8 +41,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -104,7 +102,6 @@ import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; -import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -116,7 +113,6 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singleton; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; public class ExceptionSerializationTests extends ESTestCase { @@ -872,89 +868,12 @@ public void testElasticsearchRemoteException() throws IOException { public void testShardLockObtainFailedException() throws IOException { ShardId shardId = new ShardId("foo", "_na_", 1); ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); - if (version.before(Version.V_5_0_2)) { - version = Version.V_5_0_2; - } + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); ShardLockObtainFailedException ex = serialize(orig, version); assertEquals(orig.getMessage(), ex.getMessage()); assertEquals(orig.getShardId(), ex.getShardId()); } - public void testBWCShardLockObtainFailedException() throws IOException { - ShardId shardId = new ShardId("foo", "_na_", 1); - ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom"); - Exception ex = serialize((Exception)orig, randomFrom(Version.V_5_0_0, Version.V_5_0_1)); - assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class)); - assertEquals("shard_lock_obtain_failed_exception: [foo][1]: boom", ex.getMessage()); - } - - public void testBWCHeadersAndMetadata() throws IOException { - //this is a request serialized with headers only, no metadata as they were added in 5.3.0 - BytesReference decoded = new BytesArray(Base64.getDecoder().decode - ("AQ10ZXN0ICBtZXNzYWdlACYtb3JnLmVsYXN0aWNzZWFyY2guRXhjZXB0aW9uU2VyaWFsaXphdGlvblRlc3RzASBFeGNlcHRpb25TZXJpYWxpemF0aW9uVG" + - "VzdHMuamF2YQR0ZXN03wYkc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2Y" + - "QdpbnZva2Uw/v///w8kc3VuLnJlZmxlY3QuTmF0aXZlTWV0aG9kQWNjZXNzb3JJbXBsAR1OYXRpdmVNZXRob2RBY2Nlc3NvckltcGwuamF2YQZp" + - "bnZva2U+KHN1bi5yZWZsZWN0LkRlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwBIURlbGVnYXRpbmdNZXRob2RBY2Nlc3NvckltcGwuamF2YQZ" + - "pbnZva2UrGGphdmEubGFuZy5yZWZsZWN0Lk1ldGhvZAELTWV0aG9kLmphdmEGaW52b2tl8QMzY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdG" + - "VzdGluZy5SYW5kb21pemVkUnVubmVyARVSYW5kb21pemVkUnVubmVyLmphdmEGaW52b2tlsQ01Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkd" + - "GVzdGluZy5SYW5kb21pemVkUnVubmVyJDgBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0ZYsHNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9t" + - "aXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ5ARVSYW5kb21pemVkUnVubmVyLmphdmEIZXZhbHVhdGWvBzZjb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkMTABFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQhldmFsdWF0Zb0HOWNvbS5jYXJyb3RzZW" + - "FyY2gucmFuZG9taXplZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDVvcmcuY" + - "XBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlU2V0dXBUZWFyZG93bkNoYWluZWQkMQEhVGVzdFJ1bGVTZXR1cFRlYXJkb3duQ2hhaW5lZC5qYXZh" + - "CGV2YWx1YXRlMTBvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLkFic3RyYWN0QmVmb3JlQWZ0ZXJSdWxlJDEBHEFic3RyYWN0QmVmb3JlQWZ0ZXJSdWx" + - "lLmphdmEIZXZhbHVhdGUtMm9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVUaHJlYWRBbmRUZXN0TmFtZSQxAR5UZXN0UnVsZVRocmVhZE" + - "FuZFRlc3ROYW1lLmphdmEIZXZhbHVhdGUwN29yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVBZnRlck1heEZhaWx1cmVzJDEBI" + - "1Rlc3RSdWxlSWdub3JlQWZ0ZXJNYXhGYWlsdXJlcy5qYXZhCGV2YWx1YXRlQCxvcmcuYXBhY2hlLmx1Y2VuZS51dGlsLlRlc3RSdWxlTWFya0Zh" + - "aWx1cmUkMQEYVGVzdFJ1bGVNYXJrRmFpbHVyZS5qYXZhCGV2YWx1YXRlLzljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGV" + - "zLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdG" + - "luZy5UaHJlYWRMZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wI0Y29tLmNhcnJvdHNlYXJja" + - "C5yYW5kb21pemVkdGVzdGluZy5UaHJlYWRMZWFrQ29udHJvbAEWVGhyZWFkTGVha0NvbnRyb2wuamF2YRJmb3JrVGltZW91dGluZ1Rhc2urBjZj" + - "b20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlRocmVhZExlYWtDb250cm9sJDMBFlRocmVhZExlYWtDb250cm9sLmphdmEIZXZhbHV" + - "hdGXOAzNjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIBFVJhbmRvbWl6ZWRSdW5uZXIuamF2YQ1ydW" + - "5TaW5nbGVUZXN0lAc1Y29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5SYW5kb21pemVkUnVubmVyJDUBFVJhbmRvbWl6ZWRSdW5uZ" + - "XIuamF2YQhldmFsdWF0ZaIGNWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXplZHRlc3RpbmcuUmFuZG9taXplZFJ1bm5lciQ2ARVSYW5kb21pemVk" + - "UnVubmVyLmphdmEIZXZhbHVhdGXUBjVjb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLlJhbmRvbWl6ZWRSdW5uZXIkNwEVUmFuZG9" + - "taXplZFJ1bm5lci5qYXZhCGV2YWx1YXRl3wYwb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5BYnN0cmFjdEJlZm9yZUFmdGVyUnVsZSQxARxBYnN0cm" + - "FjdEJlZm9yZUFmdGVyUnVsZS5qYXZhCGV2YWx1YXRlLTljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVud" + - "EFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQvb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZVN0b3JlQ2xhc3NO" + - "YW1lJDEBG1Rlc3RSdWxlU3RvcmVDbGFzc05hbWUuamF2YQhldmFsdWF0ZSlOY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWx" + - "lcy5Ob1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZSQxAShOb1NoYWRvd2luZ09yT3ZlcnJpZGVzT25NZXRob2RzUnVsZS5qYXZhCG" + - "V2YWx1YXRlKE5jb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLk5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSd" + - "WxlJDEBKE5vU2hhZG93aW5nT3JPdmVycmlkZXNPbk1ldGhvZHNSdWxlLmphdmEIZXZhbHVhdGUoOWNvbS5jYXJyb3RzZWFyY2gucmFuZG9taXpl" + - "ZHRlc3RpbmcucnVsZXMuU3RhdGVtZW50QWRhcHRlcgEVU3RhdGVtZW50QWRhcHRlci5qYXZhCGV2YWx1YXRlJDljb20uY2Fycm90c2VhcmNoLnJ" + - "hbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVudEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSQ5Y29tLmNhcnJvdH" + - "NlYXJjaC5yYW5kb21pemVkdGVzdGluZy5ydWxlcy5TdGF0ZW1lbnRBZGFwdGVyARVTdGF0ZW1lbnRBZGFwdGVyLmphdmEIZXZhbHVhdGUkM29yZ" + - "y5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQkMQEfVGVzdFJ1bGVBc3NlcnRpb25zUmVxdWlyZWQuamF2YQhl" + - "dmFsdWF0ZTUsb3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZU1hcmtGYWlsdXJlJDEBGFRlc3RSdWxlTWFya0ZhaWx1cmUuamF2YQhldmF" + - "sdWF0ZS83b3JnLmFwYWNoZS5sdWNlbmUudXRpbC5UZXN0UnVsZUlnbm9yZUFmdGVyTWF4RmFpbHVyZXMkMQEjVGVzdFJ1bGVJZ25vcmVBZnRlck" + - "1heEZhaWx1cmVzLmphdmEIZXZhbHVhdGVAMW9yZy5hcGFjaGUubHVjZW5lLnV0aWwuVGVzdFJ1bGVJZ25vcmVUZXN0U3VpdGVzJDEBHVRlc3RSd" + - "WxlSWdub3JlVGVzdFN1aXRlcy5qYXZhCGV2YWx1YXRlNjljb20uY2Fycm90c2VhcmNoLnJhbmRvbWl6ZWR0ZXN0aW5nLnJ1bGVzLlN0YXRlbWVu" + - "dEFkYXB0ZXIBFVN0YXRlbWVudEFkYXB0ZXIuamF2YQhldmFsdWF0ZSREY29tLmNhcnJvdHNlYXJjaC5yYW5kb21pemVkdGVzdGluZy5UaHJlYWR" + - "MZWFrQ29udHJvbCRTdGF0ZW1lbnRSdW5uZXIBFlRocmVhZExlYWtDb250cm9sLmphdmEDcnVu7wIQamF2YS5sYW5nLlRocmVhZAELVGhyZWFkLm" + - "phdmEDcnVu6QUABAdoZWFkZXIyAQZ2YWx1ZTIKZXMuaGVhZGVyMwEGdmFsdWUzB2hlYWRlcjEBBnZhbHVlMQplcy5oZWFkZXI0AQZ2YWx1ZTQAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + - "AAAAA")); - - try (StreamInput in = decoded.streamInput()) { - //randomize the version across released and unreleased ones - Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - in.setVersion(version); - ElasticsearchException exception = new ElasticsearchException(in); - assertEquals("test message", exception.getMessage()); - //the headers received as part of a single set get split based on their prefix - assertEquals(2, exception.getHeaderKeys().size()); - assertEquals("value1", exception.getHeader("header1").get(0)); - assertEquals("value2", exception.getHeader("header2").get(0)); - assertEquals(2, exception.getMetadataKeys().size()); - assertEquals("value3", exception.getMetadata("es.header3").get(0)); - assertEquals("value4", exception.getMetadata("es.header4").get(0)); - } - } - private static class UnknownException extends Exception { UnknownException(final String message, final Exception cause) { super(message, cause); diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 74303bfb6d851..4c7dc9eb094b7 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -36,8 +36,8 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_5_3_0; -import static org.elasticsearch.Version.V_6_0_0_beta1; +import static org.elasticsearch.Version.V_6_3_0; +import static org.elasticsearch.Version.V_7_0_0_alpha1; import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -50,30 +50,30 @@ public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_5_3_0.before(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.before(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.before(V_5_3_0), is(false)); + assertThat(V_6_3_0.before(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.before(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.before(V_6_3_0), is(false)); - assertThat(V_5_3_0.onOrBefore(V_6_0_0_beta1), is(true)); - assertThat(V_5_3_0.onOrBefore(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrBefore(V_5_3_0), is(false)); + assertThat(V_6_3_0.onOrBefore(V_7_0_0_alpha1), is(true)); + assertThat(V_6_3_0.onOrBefore(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrBefore(V_6_3_0), is(false)); - assertThat(V_5_3_0.after(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.after(V_5_3_0), is(false)); - assertThat(V_6_0_0_beta1.after(V_5_3_0), is(true)); + assertThat(V_6_3_0.after(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.after(V_6_3_0), is(false)); + assertThat(V_7_0_0_alpha1.after(V_6_3_0), is(true)); - assertThat(V_5_3_0.onOrAfter(V_6_0_0_beta1), is(false)); - assertThat(V_5_3_0.onOrAfter(V_5_3_0), is(true)); - assertThat(V_6_0_0_beta1.onOrAfter(V_5_3_0), is(true)); + assertThat(V_6_3_0.onOrAfter(V_7_0_0_alpha1), is(false)); + assertThat(V_6_3_0.onOrAfter(V_6_3_0), is(true)); + assertThat(V_7_0_0_alpha1.onOrAfter(V_6_3_0), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_5_3_0, is(lessThan(V_6_0_0_beta1))); - assertThat(V_5_3_0.compareTo(V_5_3_0), is(0)); - assertThat(V_6_0_0_beta1, is(greaterThan(V_5_3_0))); + assertThat(V_6_3_0, is(lessThan(V_7_0_0_alpha1))); + assertThat(V_6_3_0.compareTo(V_6_3_0), is(0)); + assertThat(V_7_0_0_alpha1, is(greaterThan(V_6_3_0))); } public void testMin() { @@ -101,12 +101,12 @@ public void testMax() { } public void testMinimumIndexCompatibilityVersion() { - assertEquals(Version.V_5_0_0, Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); - assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(5000099), Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.fromId(5000099).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_1_1.minimumIndexCompatibilityVersion()); + Version.fromId(5010000).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), - Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); + Version.fromId(5000001).minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { @@ -160,31 +160,38 @@ public void testVersionNoPresentInSettings() { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2, - Version.V_5_2_0, Version.V_6_0_0_beta1); + final Version version = Version.V_6_0_0_beta1; assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } public void testMinCompatVersion() { - Version prerelease = VersionUtils.getFirstVersion(); - assertThat(prerelease.minimumCompatibilityVersion(), equalTo(prerelease)); Version major = Version.fromString("2.0.0"); assertThat(Version.fromString("2.0.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.2.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major)); - // from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is + + Version major5x = Version.fromString("5.0.0"); + assertThat(Version.fromString("5.0.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.2.0").minimumCompatibilityVersion(), equalTo(major5x)); + assertThat(Version.fromString("5.3.0").minimumCompatibilityVersion(), equalTo(major5x)); + + Version major56x = Version.fromString("5.6.0"); + assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); + + // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_5_6_0; // TODO: remove this once min compat version is a constant instead of method - assertEquals(lastVersion.major, Version.V_6_0_0_beta1.minimumCompatibilityVersion().major); + Version lastVersion = Version.V_6_5_0; // TODO: remove this once min compat version is a constant instead of method + assertEquals(lastVersion.major, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", - lastVersion.minor, Version.V_6_0_0_beta1.minimumCompatibilityVersion().minor); - assertEquals(0, Version.V_6_0_0_beta1.minimumCompatibilityVersion().revision); + lastVersion.minor, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().minor); + assertEquals(0, Version.V_7_0_0_alpha1.minimumCompatibilityVersion().revision); } public void testToString() { // with 2.0.beta we lowercase assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString()); - assertEquals("5.0.0-alpha1", Version.V_5_0_0_alpha1.toString()); + assertEquals("5.0.0-alpha1", Version.fromId(5000001).toString()); assertEquals("2.3.0", Version.fromString("2.3.0").toString()); assertEquals("0.90.0.Beta1", Version.fromString("0.90.0.Beta1").toString()); assertEquals("1.0.0.Beta1", Version.fromString("1.0.0.Beta1").toString()); @@ -334,11 +341,11 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertTrue(isCompatible(Version.V_5_6_0, Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2)); - assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0)); - assertFalse(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0"))); - assertFalse(isCompatible(Version.fromString("6.0.0-alpha1"), Version.fromString("7.0.0"))); + assertTrue(isCompatible(Version.V_6_5_0, Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0_alpha1)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); + assertFalse(isCompatible(Version.fromString("7.0.0"), Version.fromString("8.0.0"))); + assertFalse(isCompatible(Version.fromString("7.0.0-alpha1"), Version.fromString("8.0.0"))); final Version currentMajorVersion = Version.fromId(Version.CURRENT.major * 1000000 + 99); final Version currentOrNextMajorVersion; @@ -373,8 +380,8 @@ public void testIsCompatible() { isCompatible(VersionUtils.getPreviousMinorVersion(), currentOrNextMajorVersion), equalTo(isCompatible)); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("6.0.0"))); - assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("7.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("6.0.0"))); + assertFalse(isCompatible(Version.fromId(5000099), Version.fromString("7.0.0"))); Version a = randomVersion(random()); Version b = randomVersion(random()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 7bf43b828c05a..3384efcf836c6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -49,7 +49,6 @@ import java.util.List; import java.util.Map; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java index 232259948fb2f..5f5fe54321bbb 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java @@ -54,7 +54,7 @@ public void testSerialization() throws Exception { request.routing(routings); } - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try (BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); request.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 90eb7cdcfd46a..f685be02141ad 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -77,7 +77,7 @@ public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.addAll(searchModule.getNamedWriteables()); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); try(BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); clusterSearchShardsResponse.writeTo(out); @@ -93,11 +93,7 @@ public void testSerialization() throws Exception { assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId()); assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards()); } - if (version.onOrAfter(Version.V_5_1_1)) { - assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); - } else { - assertNull(deserialized.getIndicesAndFilters()); - } + assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters()); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index 0cb0063727fe7..c0685d5d17d29 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.AbstractStreamableXContentTestCase; @@ -67,25 +64,6 @@ protected CreateIndexResponse doParseInstance(XContentParser parser) { return CreateIndexResponse.fromXContent(parser); } - public void testSerializationWithOldVersion() throws IOException { - Version oldVersion = Version.V_5_4_0; - CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(oldVersion); - response.writeTo(output); - - try (StreamInput in = output.bytes().streamInput()) { - in.setVersion(oldVersion); - CreateIndexResponse serialized = new CreateIndexResponse(); - serialized.readFrom(in); - assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); - assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); - assertNull(serialized.index()); - } - } - } - public void testToXContent() { CreateIndexResponse response = new CreateIndexResponse(true, false, "index_name"); String output = Strings.toString(response); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 86c2b67be9c54..5243ffd33b39c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -19,20 +19,14 @@ package org.elasticsearch.action.admin.indices.mapping.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.index.Index; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; @@ -87,27 +81,6 @@ public void testBuildFromSimplifiedDef() { assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage()); } - public void testPutMappingRequestSerialization() throws IOException { - PutMappingRequest request = new PutMappingRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.source(mapping, XContentType.YAML); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.source()); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - bytesStreamOutput.setVersion(version); - request.writeTo(bytesStreamOutput); - try (StreamInput in = StreamInput.wrap(bytesStreamOutput.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutMappingRequest serialized = new PutMappingRequest(); - serialized.readFrom(in); - - String source = serialized.source(); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), source); - } - } - } - public void testToXContent() throws IOException { PutMappingRequest request = new PutMappingRequest("foo"); request.type("my_type"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index c21e6b3c225f0..2d037d7c024d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -18,25 +18,16 @@ */ package org.elasticsearch.action.admin.indices.template.put; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static org.hamcrest.Matchers.containsString; @@ -46,81 +37,6 @@ import static org.hamcrest.core.Is.is; public class PutIndexTemplateRequestTests extends AbstractXContentTestCase { - - // bwc for #21009 - public void testPutIndexTemplateRequest510() throws IOException { - PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("test"); - putRequest.patterns(Collections.singletonList("test*")); - putRequest.order(5); - - PutIndexTemplateRequest multiPatternRequest = new PutIndexTemplateRequest("test"); - multiPatternRequest.patterns(Arrays.asList("test*", "*test2", "*test3*")); - multiPatternRequest.order(5); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code. - // Note: Instead of a list for the template, in 5_0_0 the element was provided as a string. - String putRequestBytes = "ADwDAAR0ZXN0BXRlc3QqAAAABQAAAAAAAA=="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(putRequestBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - PutIndexTemplateRequest readRequest = new PutIndexTemplateRequest(); - readRequest.readFrom(in); - assertEquals(putRequest.patterns(), readRequest.patterns()); - assertEquals(putRequest.order(), readRequest.order()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiPatternRequest.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - - public void testPutIndexTemplateRequestSerializationXContent() throws IOException { - PutIndexTemplateRequest request = new PutIndexTemplateRequest("foo"); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - request.patterns(Collections.singletonList("foo")); - request.mapping("bar", mapping, XContentType.YAML); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - - final Version version = randomFrom(Version.CURRENT, Version.V_5_3_0, Version.V_5_3_1, Version.V_5_3_2, Version.V_5_4_0); - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - - try (StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes)) { - in.setVersion(version); - PutIndexTemplateRequest serialized = new PutIndexTemplateRequest(); - serialized.readFrom(in); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), - serialized.mappings().get("bar")); - } - } - } - - public void testPutIndexTemplateRequestSerializationXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("ADwDAANmb28IdGVtcGxhdGUAAAAAAAABA2Jhcg8tLS0KZm9vOiAiYmFyIgoAAAAAAAAAAAAAAAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - PutIndexTemplateRequest request = new PutIndexTemplateRequest(); - request.readFrom(in); - String mapping = Strings.toString(YamlXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - assertNotEquals(mapping, request.mappings().get("bar")); - assertEquals(XContentHelper.convertToJson(new BytesArray(mapping), false, XContentType.YAML), request.mappings().get("bar")); - assertEquals("foo", request.name()); - assertEquals("template", request.patterns().get(0)); - } - } - public void testValidateErrorMessage() throws Exception { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); ActionRequestValidationException withoutNameAndPattern = request.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 4c0dacc8a6e73..8b68d2b6bb9bd 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.ingest.PipelineExecutionService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -90,9 +89,6 @@ public class TransportBulkActionIngestTests extends ESTestCase { ClusterService clusterService; IngestService ingestService; - /** The ingest execution service we can capture calls to */ - PipelineExecutionService executionService; - /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor ArgumentCaptor> failureHandler; @@ -207,8 +203,6 @@ public void setupAction() { }).when(clusterService).addStateApplier(any(ClusterStateApplier.class)); // setup the mocked ingest service for capturing calls ingestService = mock(IngestService.class); - executionService = mock(PipelineExecutionService.class); - when(ingestService.getPipelineExecutionService()).thenReturn(executionService); action = new TestTransportBulkAction(); singleItemBulkWriteAction = new TestSingleItemBulkWriteAction(action); reset(transportService); // call on construction of action @@ -265,7 +259,7 @@ public void testIngestLocal() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -299,7 +293,7 @@ public void testSingleItemBulkActionIngestLocal() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -331,7 +325,7 @@ public void testIngestForward() throws Exception { action.execute(null, bulkRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -375,7 +369,7 @@ public void testSingleItemBulkActionIngestForward() throws Exception { singleItemBulkWriteAction.execute(null, indexRequest, listener); // should not have executed ingest locally - verify(executionService, never()).executeBulkRequest(any(), any(), any()); + verify(ingestService, never()).executeBulkRequest(any(), any(), any()); // but instead should have sent to a remote node with the transport service ArgumentCaptor node = ArgumentCaptor.forClass(DiscoveryNode.class); verify(transportService).sendRequest(node.capture(), eq(BulkAction.NAME), any(), remoteResponseHandler.capture()); @@ -423,7 +417,7 @@ public void testUseDefaultPipeline() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); @@ -455,7 +449,7 @@ public void testCreateIndexBeforeRunPipeline() throws Exception { assertFalse(action.isExecuted); // haven't executed yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); - verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); completionHandler.getValue().accept(exception); assertTrue(failureCalled.get()); diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java index b0c6d717bb38e..1711d16891083 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.CompoundProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Pipeline; -import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; import org.elasticsearch.test.ESTestCase; @@ -53,7 +53,7 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase { - private PipelineStore store; + private IngestService ingestService; @Before public void init() throws IOException { @@ -62,9 +62,9 @@ public void init() throws IOException { Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, null, pipelineCompoundProcessor); Map registry = Collections.singletonMap("mock_processor", (factories, tag, config) -> processor); - store = mock(PipelineStore.class); - when(store.get(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); - when(store.getProcessorFactories()).thenReturn(registry); + ingestService = mock(IngestService.class); + when(ingestService.getPipeline(SIMULATED_PIPELINE_ID)).thenReturn(pipeline); + when(ingestService.getProcessorFactories()).thenReturn(registry); } public void testParseUsingPipelineStore() throws Exception { @@ -94,7 +94,8 @@ public void testParseUsingPipelineStore() throws Exception { expectedDocs.add(expectedDoc); } - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = + SimulatePipelineRequest.parseWithPipelineId(SIMULATED_PIPELINE_ID, requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -182,7 +183,7 @@ public void testParseWithProvidedPipeline() throws Exception { requestContent.put(Fields.PIPELINE, pipelineConfig); - SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, store); + SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, ingestService); assertThat(actualRequest.isVerbose(), equalTo(false)); assertThat(actualRequest.getDocuments().size(), equalTo(numDocs)); Iterator> expectedDocsIterator = expectedDocs.iterator(); @@ -208,7 +209,7 @@ public void testNullPipelineId() { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(null, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("param [pipeline] is null")); } @@ -218,7 +219,7 @@ public void testNonExistentPipelineId() { List> docs = new ArrayList<>(); requestContent.put(Fields.DOCS, docs); Exception e = expectThrows(IllegalArgumentException.class, - () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, store)); + () -> SimulatePipelineRequest.parseWithPipelineId(pipelineId, requestContent, false, ingestService)); assertThat(e.getMessage(), equalTo("pipeline [" + pipelineId + "] does not exist")); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java index 5cd82be8cb04c..53c307c430815 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +27,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Base64; import static org.hamcrest.CoreMatchers.equalTo; @@ -68,22 +66,4 @@ public void testSerializationWithXContent() throws IOException { assertEquals(XContentType.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } - - public void testSerializationWithXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAnt9AAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - SimulatePipelineRequest request = new SimulatePipelineRequest(in); - assertEquals(XContentType.JSON, request.getXContentType()); - assertEquals("{}", request.getSource().utf8ToString()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 8b1741967734c..50bbad16ab73b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.Transport; import java.io.IOException; @@ -110,17 +109,6 @@ public void run() throws IOException { } } - public void testOldNodesTriggerException() { - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("search.remote.connect", false).build(), null, null); - DiscoveryNode node = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), - VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_6_0))); - SearchAsyncActionTests.MockConnection mockConnection = new SearchAsyncActionTests.MockConnection(node); - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, - () -> searchTransportService.sendCanMatch(mockConnection, null, null, null)); - assertEquals("can_match is not supported on pre 5.6 nodes", illegalArgumentException.getMessage()); - } - public void testFilterWithFailure() throws InterruptedException { final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 87e66477a0411..feb5ef50795dc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -47,13 +46,11 @@ import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; -import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.List; @@ -290,27 +287,4 @@ public void testSerialization() throws IOException { assertEquals(searchResponse.getClusters(), serialized.getClusters()); } } - - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAAAAAAAAAAAAgABBQUAAAoAAAAAAAAA"); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0); - try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), namedWriteableRegistry)) { - in.setVersion(version); - SearchResponse deserialized = new SearchResponse(); - deserialized.readFrom(in); - assertSame(SearchResponse.Clusters.EMPTY, deserialized.getClusters()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - deserialized.writeTo(out); - try (StreamInput in2 = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytesRef().bytes), - namedWriteableRegistry)) { - in2.setVersion(version); - SearchResponse deserialized2 = new SearchResponse(); - deserialized2.readFrom(in2); - assertSame(SearchResponse.Clusters.EMPTY, deserialized2.getClusters()); - } - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index a16a8f628f98b..216c1802956e8 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -36,14 +36,11 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -60,7 +57,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.EnumSet; import java.util.HashSet; import java.util.Set; @@ -264,34 +260,6 @@ public void testStreamRequest() throws IOException { } } - public void testStreamRequestWithXContentBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AAABBWluZGV4BHR5cGUCaWQBAnt9AAABDnNvbWVQcmVmZXJlbmNlFgAAAAEA//////////0AAAA="); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - TermVectorsRequest request = new TermVectorsRequest(); - request.readFrom(in); - assertEquals("index", request.index()); - assertEquals("type", request.type()); - assertEquals("id", request.id()); - assertTrue(request.offsets()); - assertFalse(request.fieldStatistics()); - assertTrue(request.payloads()); - assertFalse(request.positions()); - assertTrue(request.termStatistics()); - assertEquals("somePreference", request.preference()); - assertEquals("{}", request.doc().utf8ToString()); - assertEquals(XContentType.JSON, request.xContentType()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - request.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - public void testFieldTypeToTermVectorString() throws Exception { FieldType ft = new FieldType(); ft.setStoreTermVectorOffsets(false); diff --git a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index 8180dd96e8e18..1e18135f4eb72 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -52,7 +52,7 @@ public class BootstrapChecksTests extends ESTestCase { - private static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); + static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); public void testNonProductionMode() throws NodeValidationException { // nothing should happen since we are in non-production mode @@ -356,31 +356,6 @@ long getRlimInfinity() { BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); } - public void testMaxMapCountCheck() throws NodeValidationException { - final int limit = 1 << 18; - final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, limit - 1)); - final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() { - @Override - long getMaxMapCount() { - return maxMapCount.get(); - } - }; - - final NodeValidationException e = expectThrows( - NodeValidationException.class, - () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check))); - assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count")); - - maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - - // nothing should happen if current vm.max_map_count is not - // available - maxMapCount.set(-1); - BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)); - } - public void testClientJvmCheck() throws NodeValidationException { final AtomicReference vmName = new AtomicReference<>("Java HotSpot(TM) 32-Bit Client VM"); final BootstrapCheck check = new BootstrapChecks.ClientJvmCheck() { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa3b..9a964a97bd73c 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -24,16 +24,21 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import java.io.BufferedReader; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Predicate; import static org.hamcrest.CoreMatchers.equalTo; @@ -45,6 +50,66 @@ public class MaxMapCountCheckTests extends ESTestCase { + // initialize as if the max map count is under the limit, tests can override by setting maxMapCount before executing the check + private final AtomicLong maxMapCount = new AtomicLong(randomIntBetween(1, Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) - 1)); + private final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() { + @Override + long getMaxMapCount() { + return maxMapCount.get(); + } + }; + + private void assertFailure(final BootstrapCheck.BootstrapCheckResult result) { + assertTrue(result.isFailure()); + assertThat( + result.getMessage(), + equalTo( + "max virtual memory areas vm.max_map_count [" + maxMapCount.get() + "] is too low, " + + "increase to at least [" + BootstrapChecks.MaxMapCountCheck.LIMIT + "]")); + } + + public void testMaxMapCountCheckBelowLimit() { + assertFailure(check.check(BootstrapChecksTests.defaultContext)); + } + + public void testMaxMapCountCheckBelowLimitAndMemoryMapAllowed() { + /* + * There are two ways that memory maps are allowed: + * - by default + * - mmapfs is explicitly allowed + * We want to test that if mmapfs is allowed then the max map count check is enforced. + */ + final List settingsThatAllowMemoryMap = new ArrayList<>(); + settingsThatAllowMemoryMap.add(Settings.EMPTY); + settingsThatAllowMemoryMap.add(Settings.builder().put("node.store.allow_mmapfs", true).build()); + + for (final Settings settingThatAllowsMemoryMap : settingsThatAllowMemoryMap) { + assertFailure(check.check(new BootstrapContext(settingThatAllowsMemoryMap, MetaData.EMPTY_META_DATA))); + } + } + + public void testMaxMapCountCheckNotEnforcedIfMemoryMapNotAllowed() { + // nothing should happen if current vm.max_map_count is under the limit but mmapfs is not allowed + final Settings settings = Settings.builder().put("node.store.allow_mmapfs", false).build(); + final BootstrapContext context = new BootstrapContext(settings, MetaData.EMPTY_META_DATA); + final BootstrapCheck.BootstrapCheckResult result = check.check(context); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckAboveLimit() { + // nothing should happen if current vm.max_map_count exceeds the limit + maxMapCount.set(randomIntBetween(Math.toIntExact(BootstrapChecks.MaxMapCountCheck.LIMIT) + 1, Integer.MAX_VALUE)); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + + public void testMaxMapCountCheckMaxMapCountNotAvailable() { + // nothing should happen if current vm.max_map_count is not available + maxMapCount.set(-1); + final BootstrapCheck.BootstrapCheckResult result = check.check(BootstrapChecksTests.defaultContext); + assertTrue(result.isSuccess()); + } + public void testGetMaxMapCountOnLinux() { if (Constants.LINUX) { final BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck(); @@ -142,7 +207,7 @@ private ParameterizedMessageLoggingExpectation( } @Override - public void match(LogEvent event) { + public void match(final LogEvent event) { if (event.getLevel().equals(level) && event.getLoggerName().equals(loggerName) && event.getMessage() instanceof ParameterizedMessage) { diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index afc6b47483eba..9baf2e1c95644 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -379,10 +379,10 @@ public void testSniffNodesSamplerClosesConnections() throws Exception { transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); transportClientNodesService.doSample(); - assertEquals(1, clientService.connectionManager().connectedNodeCount()); + assertEquals(1, clientService.connectionManager().size()); establishedConnections.clear(); handler.blockRequest(); diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 473f5152e8fd7..b8c39c48f8870 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -134,7 +134,7 @@ public void testReconnect() { private void assertConnectedExactlyToNodes(ClusterState state) { assertConnected(state.nodes()); - assertThat(transportService.getConnectionManager().connectedNodeCount(), equalTo(state.nodes().getSize())); + assertThat(transportService.getConnectionManager().size(), equalTo(state.nodes().getSize())); } private void assertConnected(Iterable nodes) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index 6d489f5feb314..c98587c4cc63f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -18,12 +18,9 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -35,62 +32,15 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; public class IndexTemplateMetaDataTests extends ESTestCase { - // bwc for #21009 - public void testIndexTemplateMetaData510() throws IOException { - IndexTemplateMetaData metaData = IndexTemplateMetaData.builder("foo") - .patterns(Collections.singletonList("bar")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - IndexTemplateMetaData multiMetaData = IndexTemplateMetaData.builder("foo") - .patterns(Arrays.asList("bar", "foo")) - .order(1) - .settings(Settings.builder() - .put("setting1", "value1") - .put("setting2", "value2")) - .putAlias(newAliasMetaDataBuilder("alias-bar1")).build(); - - // These bytes were retrieved by Base64 encoding the result of the above with 5_0_0 code - String templateBytes = "A2ZvbwAAAAEDYmFyAghzZXR0aW5nMQEGdmFsdWUxCHNldHRpbmcyAQZ2YWx1ZTIAAQphbGlhcy1iYXIxAAAAAAA="; - BytesArray bytes = new BytesArray(Base64.getDecoder().decode(templateBytes)); - - try (StreamInput in = bytes.streamInput()) { - in.setVersion(Version.V_5_0_0); - IndexTemplateMetaData readMetaData = IndexTemplateMetaData.readFrom(in); - assertEquals(0, in.available()); - assertEquals(metaData.getName(), readMetaData.getName()); - assertEquals(metaData.getPatterns(), readMetaData.getPatterns()); - assertTrue(metaData.aliases().containsKey("alias-bar1")); - assertEquals(1, metaData.aliases().size()); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(Version.V_5_0_0); - readMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - - // test that multi templates are reverse-compatible. - // for the bwc case, if multiple patterns, use only the first pattern seen. - output.reset(); - multiMetaData.writeTo(output); - assertEquals(bytes.toBytesRef(), output.bytes().toBytesRef()); - } - } - public void testIndexTemplateMetaDataXContentRoundTrip() throws Exception { ToXContent.Params params = new ToXContent.MapParams(singletonMap("reduce_mappings", "true")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index e329e70134c0c..c1e341fd5bc2f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -147,7 +147,7 @@ public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0_alpha1) .put(indexSettings) .build(); return IndexMetaData.builder(name).settings(build).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 8038d9b5e18de..d4645208071a3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -228,7 +228,7 @@ protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { } final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, - VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, null)); + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 1fa1ff3a154af..787789d410ff9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -576,7 +576,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // add a single node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .add(newNode("node1-5.x", Version.V_5_6_0))) + .add(newNode("node1-5.x", Version.fromId(5060099)))) .build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); @@ -590,7 +590,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // add another 5.6 node clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node2-5.x", Version.V_5_6_0))) + .add(newNode("node2-5.x", Version.fromId(5060099)))) .build(); // start the shards, should have 1 primary and 1 replica available diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index 2022ecb945ba0..536e3cbb7e08d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -39,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; @@ -243,46 +241,4 @@ public void testSourcePrimaryActive() { routingAllocation).getExplanation()); } } - - public void testAllocateOnOldNode() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - VersionUtils.getPreviousVersion(ResizeAction.COMPATIBILITY_VERSION)); - ClusterState clusterState = createInitialClusterState(true, version); - MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); - metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") - .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) - .numberOfShards(4).numberOfReplicas(0)); - MetaData metaData = metaBuilder.build(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); - routingTableBuilder.addAsNew(metaData.index("target")); - - clusterState = ClusterState.builder(clusterState) - .routingTable(routingTableBuilder.build()) - .metaData(metaData).build(); - Index idx = clusterState.metaData().index("target").getIndex(); - - - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); - int shardId = randomIntBetween(0, 3); - int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); - assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); - - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation)); - assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation)); - - routingAllocation.debugDecision(true); - assertEquals("source primary is active", resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); - assertEquals("node [node1] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), - routingAllocation).getExplanation()); - assertEquals("node [node2] is too old to split a shard", - resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), - routingAllocation).getExplanation()); - } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index 87f98389231e4..b4a24cfc4fcd0 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -98,6 +98,11 @@ public void testLongGeohashes() { } } + public void testNorthPoleBoundingBox() { + Rectangle bbox = GeoHashUtils.bbox("zzbxfpgzupbx"); // Bounding box with maximum precision touching north pole + assertEquals(90.0, bbox.maxLat, 0.0000001); // Should be 90 degrees + } + public void testInvalidGeohashes() { IllegalArgumentException ex; diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2376d5663402e..1580c1a379780 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; @@ -180,6 +181,99 @@ public void testDependentSettings() { service.validate(Settings.builder().put("foo.test.bar", 7).build(), false); } + public void testTupleAffixUpdateConsumer() { + String prefix = randomAlphaOfLength(3) + "foo."; + String intSuffix = randomAlphaOfLength(3); + String listSuffix = randomAlphaOfLength(4); + Setting.AffixSetting intSetting = Setting.affixKeySetting(prefix, intSuffix, + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting> listSetting = Setting.affixKeySetting(prefix, listSuffix, + (k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting))); + Map, Integer>> results = new HashMap<>(); + Function listBuilder = g -> (prefix + g + "." + listSuffix); + Function intBuilder = g -> (prefix + g + "." + intSuffix); + String group1 = randomAlphaOfLength(3); + String group2 = randomAlphaOfLength(4); + String group3 = randomAlphaOfLength(5); + BiConsumer, Integer>> listConsumer = results::put; + + service.addAffixUpdateConsumer(listSetting, intSetting, listConsumer, (s, k) -> { + if (k.v1().isEmpty() && k.v2() == 2) { + throw new IllegalArgumentException("boom"); + } + }); + assertEquals(0, results.size()); + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group2), "18", "19", "20") + .build()); + assertEquals(2, results.get(group1).v2().intValue()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(Arrays.asList(18, 19, 20), results.get(group2).v1()); + assertEquals(2, results.size()); + + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putNull(listBuilder.apply(group2)) // removed + .build()); + + assertNull(group1 + " wasn't changed", results.get(group1)); + assertEquals(1, results.get(group2).v1().size()); + assertEquals(Arrays.asList(1), results.get(group2).v1()); + assertEquals(7, results.get(group2).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") // added + .build()); + assertNull(group1 + " wasn't changed", results.get(group1)); + assertNull(group2 + " wasn't changed", results.get(group2)); + + assertEquals(2, results.get(group3).v1().size()); + assertEquals(Arrays.asList(5, 6), results.get(group3).v1()); + assertEquals(1, results.get(group3).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 4) // modified + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1), "16", "17") + .putList(listBuilder.apply(group3), "5", "6") + .build()); + assertNull(group2 + " wasn't changed", results.get(group2)); + assertNull(group3 + " wasn't changed", results.get(group3)); + + assertEquals(2, results.get(group1).v1().size()); + assertEquals(Arrays.asList(16, 17), results.get(group1).v1()); + assertEquals(4, results.get(group1).v2().intValue()); + assertEquals(1, results.size()); + results.clear(); + + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + service.applySettings(Settings.builder() + .put(intBuilder.apply(group1), 2) // modified to trip validator + .put(intBuilder.apply(group2), 7) + .putList(listBuilder.apply(group1)) // modified to trip validator + .putList(listBuilder.apply(group3), "5", "6") + .build()) + ); + assertEquals("boom", iae.getMessage()); + assertEquals(0, results.size()); + } + public void testAddConsumerAffix() { Setting.AffixSetting intSetting = Setting.affixKeySetting("foo.", "bar", (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); @@ -893,7 +987,7 @@ public void testInternalIndexSettingsFailsValidation() { public void testInternalIndexSettingsSkipValidation() { final Setting internalIndexSetting = Setting.simpleString("index.internal", Property.InternalIndex, Property.IndexScope); - final IndexScopedSettings indexScopedSettings = + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, Collections.singleton(internalIndexSetting)); // nothing should happen, validation should not throw an exception final Settings settings = Settings.builder().put("index.internal", "internal").build(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index c32037f44525e..7063e53f7891e 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -757,7 +757,7 @@ public void testTimeValue() { public void testSettingsGroupUpdater() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.foo", 123).put("prefix.same", 5555).build(); @@ -768,7 +768,7 @@ public void testSettingsGroupUpdater() { public void testSettingsGroupUpdaterRemoval() { Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, intSetting2)); Settings current = Settings.builder().put("prefix.same", 5555).build(); @@ -783,7 +783,7 @@ public void testSettingsGroupUpdaterWithAffixSetting() { Setting.AffixSetting affixSetting = Setting.affixKeySetting("prefix.foo.", "suffix", key -> Setting.simpleString(key,Property.NodeScope, Property.Dynamic)); - AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, Arrays.asList(intSetting, prefixKeySetting, affixSetting)); Settings.Builder currentSettingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index e193ea34498cf..feaa7c4a0ae58 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.hamcrest.MatcherAssert; @@ -319,9 +318,4 @@ public void testGetBytesAsInt() { } } } - - public void testOldSerialisation() throws IOException { - ByteSizeValue original = createTestInstance(); - assertSerialization(original, randomFrom(Version.V_5_6_4, Version.V_5_6_5, Version.V_6_0_0, Version.V_6_0_1, Version.V_6_1_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 76dd8e343a266..dd2627f4bc206 100644 --- a/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -63,7 +63,7 @@ public void testUpgradeCustomDataPath() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -91,7 +91,7 @@ public void testPartialUpgradeCustomDataPath() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -129,7 +129,7 @@ public void testUpgrade() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); @@ -153,7 +153,7 @@ public void testUpgradeIndices() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java index 2f4be2fcd5394..3c06838593fb9 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java @@ -80,7 +80,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { + if (maxNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -91,7 +91,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.before(Version.V_5_5_0)) { + if (minNodeVersion.before(Version.V_6_0_0)) { Version tooHigh = incompatibleFutureVersion(minNodeVersion); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { @@ -102,7 +102,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { + if (minNodeVersion.onOrAfter(Version.V_7_0_0_alpha1)) { Version oldMajor = randomFrom(allVersions().stream().filter(v -> v.major < 6).collect(Collectors.toList())); expectThrows(IllegalStateException.class, () -> MembershipAction.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 5ed6b957c78a4..829d6ff7c1458 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -528,7 +528,7 @@ public void testGetFieldsMetaDataWithRouting() throws Exception { assertAcked(prepareCreate("test") .addMapping("_doc", "field1", "type=keyword,store=true") .addAlias(new Alias("alias")) - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id))); + .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_6_0_0.id))); // multi types in 5.6 client().prepareIndex("test", "_doc", "1") diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index a82b932e2b570..000722863887c 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -87,6 +87,8 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; public class IndexModuleTests extends ESTestCase { @@ -376,6 +378,21 @@ public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOExc indexService.close("simon says", false); } + public void testMmapfsStoreTypeNotAllowed() { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put("index.store.type", "mmapfs") + .build(); + final Settings nodeSettings = Settings.builder() + .put(IndexModule.NODE_STORE_ALLOW_MMAPFS.getKey(), false) + .build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(new Index("foo", "_na_"), settings, nodeSettings); + final IndexModule module = + new IndexModule(indexSettings, emptyAnalysisRegistry, new InternalEngineFactory(), Collections.emptyMap()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); + assertThat(e, hasToString(containsString("store type [mmapfs] is not allowed"))); + } + class CustomQueryCache implements QueryCache { @Override diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 78569d927be76..0dcba53df88e7 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -146,15 +146,4 @@ public void testInvalidMissing() throws IOException { assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + " must be one of [_last, _first]")); } - - public void testInvalidVersion() throws IOException { - final Settings settings = Settings.builder() - .put("index.sort.field", "field1") - .build(); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0)); - assertThat(exc.getMessage(), - containsString("unsupported index.version.created:5.4.0, " + - "can't set index.sort on versions prior to 6.0.0-alpha1")); - } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 26a5b87866c21..04dc98deb7bf5 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -103,7 +103,7 @@ public void testOverrideDefaultAnalyzer() throws IOException { } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); AnalyzerProvider defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 9aba48f7de55b..33ec090c61e01 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -56,21 +56,21 @@ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_5_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_6_0_0))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); // same es version should be cached - assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_2_1)); - assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_0), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_5_0_1)); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_0), + PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_1)); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_5_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 123c65da46cc1..4ecc7f6000d21 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -661,7 +661,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); assertThat(counter.get(), equalTo(2)); searcher.close(); @@ -678,7 +678,7 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertFalse(engine.isRecovering()); doc = testParsedDocument("2", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc)); @@ -708,7 +708,7 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } trimUnsafeCommits(engine.config()); try (Engine recoveringEngine = new InternalEngine(engine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -744,7 +744,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } }; assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertTrue(committed.get()); } finally { IOUtils.close(recoveringEngine); @@ -778,7 +778,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { initialEngine.close(); trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); assertEquals(docs, topDocs.totalHits); @@ -788,6 +788,43 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { } } + public void testRecoveryFromTranslogUpToSeqNo() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final long maxSeqNo; + try (InternalEngine engine = createEngine(config)) { + final int docs = randomIntBetween(1, 100); + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + if (rarely()) { + engine.rollTranslogGeneration(); + } else if (rarely()) { + engine.flush(randomBoolean(), true); + } + } + maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + engine.syncTranslog(); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + engine.recoverFromTranslog(Long.MAX_VALUE); + assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); + } + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); + engine.recoverFromTranslog(upToSeqNo); + assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); + assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); + } + } + } + public void testConcurrentGetAndFlush() throws Exception { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1165,7 +1202,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } @@ -1184,7 +1221,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.close(); trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } @@ -2150,7 +2187,7 @@ public void testSeqNoAndCheckpoints() throws IOException { trimUnsafeCommits(initialEngine.engineConfig); try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertThat( @@ -2471,7 +2508,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); globalCheckpoint.set(engine.getLocalCheckpoint()); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); + expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog(Long.MAX_VALUE)); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2493,7 +2530,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2510,7 +2547,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); } @@ -2524,7 +2561,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); @@ -2630,7 +2667,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); globalCheckpoint.set(engine.getLocalCheckpoint()); @@ -2641,7 +2678,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier))) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertVisibleCount(engine, 1); final long committedGen = Long.valueOf( engine.getLastCommittedSegmentInfos().getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2709,7 +2746,7 @@ public void testTranslogReplay() throws IOException { engine.close(); trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertVisibleCount(engine, numDocs, false); parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); @@ -3418,7 +3455,7 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); @@ -3701,7 +3738,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } @@ -3812,7 +3849,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); } }; - noOpEngine.recoverFromTranslog(); + noOpEngine.recoverFromTranslog(Long.MAX_VALUE); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); @@ -4090,7 +4127,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); @@ -4126,7 +4163,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { if (flushed) { assertThat(recoveringEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); } - recoveringEngine.recoverFromTranslog(); + recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); @@ -4319,7 +4356,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s super.commitIndexWriter(writer, translog, syncId); } }) { - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index f48603d30515f..a910c2c86bab8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -40,18 +40,11 @@ public void testParseUnknownParam() throws Exception { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1)); + () -> DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1)); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } public void testParseUnknownMatchType() { - Map templateDef = new HashMap<>(); - templateDef.put("match_mapping_type", "short"); - templateDef.put("mapping", Collections.singletonMap("store", true)); - // if a wrong match type is specified, we ignore the template - assertNull(DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5)); - assertWarnings("match_mapping_type [short] is invalid and will be ignored: No field type matched on [short], " + - "possible values are [object, string, long, double, boolean, date, binary]"); Map templateDef2 = new HashMap<>(); templateDef2.put("match_mapping_type", "text"); templateDef2.put("mapping", Collections.singletonMap("store", true)); @@ -79,7 +72,7 @@ public void testMatchAllTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -87,7 +80,7 @@ public void testMatchTypeTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha5); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -97,7 +90,7 @@ public void testSerialization() throws Exception { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -107,7 +100,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -117,7 +110,7 @@ public void testSerialization() throws Exception { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -128,7 +121,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_5_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 8f2a51bbfc2bd..5172e7b0b8839 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -57,7 +57,7 @@ protected Collection> getPlugins() { } public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); @@ -107,7 +107,7 @@ public void testExternalValues() throws Exception { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); @@ -173,7 +173,7 @@ public void testExternalValuesWithMultifield() throws Exception { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 56e587dc995da..8e5c81e58f189 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -321,11 +321,16 @@ public void testBoost() throws IOException { public void testEnableNorms() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "keyword").field("norms", true).endObject().endObject() - .endObject().endObject()); + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .field("doc_values", false) + .field("norms", true) + .endObject() + .endObject() + .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference @@ -336,8 +341,11 @@ public void testEnableNorms() throws IOException { XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); + assertEquals(1, fields.length); assertFalse(fields[0].fieldType().omitNorms()); + + IndexableField[] fieldNamesFields = doc.rootDoc().getFields(FieldNamesFieldMapper.NAME); + assertEquals(0, fieldNamesFields.length); } public void testNormalizer() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index a291062c7a5bf..eae5b4ac7d2ab 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -28,9 +27,11 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.Lucene; @@ -132,6 +133,23 @@ public void testTermsQuery() { assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + public void testExistsQuery() { + MappedFieldType ft = createDefaultFieldType(); + ft.setName("field"); + + ft.setHasDocValues(true); + ft.setOmitNorms(true); + assertEquals(new DocValuesFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(false); + assertEquals(new NormsFieldExistsQuery("field"), ft.existsQuery(null)); + + ft.setHasDocValues(false); + ft.setOmitNorms(true); + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + } + public void testRegexpQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 0af663219903f..3bec98d33eec7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -61,7 +61,7 @@ public void testDocValuesSingleType() throws Exception { public void testDocValues(boolean singleType) throws IOException { Settings indexSettings = singleType ? Settings.EMPTY : Settings.builder() - .put("index.version.created", Version.V_5_6_0) + .put("index.version.created", Version.V_6_0_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 0de9cac885502..496d8512d4e28 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -366,9 +365,6 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws public void testMatchPhrasePrefixWithBoost() throws Exception { QueryShardContext context = createShardContext(); - assumeTrue("test runs only when the index version is on or after V_5_0_0_alpha1", - context.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1)); - { // field boost is applied on a single term query MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder("string_boost", "foo"); diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 6ac97373dfa1a..72898dd3911cd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -36,13 +35,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.search.MoreLikeThisQuery; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; @@ -52,7 +49,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -333,26 +329,6 @@ public void testItemFromXContent() throws IOException { assertEquals(expectedItem, newItem); } - public void testItemSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AQVpbmRleAEEdHlwZQEODXsiZm9vIjoiYmFyIn0A/wD//////////QAAAAAAAAAA"); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - Item item = new Item(in); - assertEquals(XContentType.JSON, item.xContentType()); - assertEquals("{\"foo\":\"bar\"}", item.doc().utf8ToString()); - assertEquals("index", item.index()); - assertEquals("type", item.type()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(version); - item.writeTo(out); - assertArrayEquals(data, out.bytes().toBytesRef().bytes); - } - } - } - @Override protected boolean isCachable(MoreLikeThisQueryBuilder queryBuilder) { return queryBuilder.likeItems().length == 0; // items are always fetched diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index e30cdaca40204..2f69ef7674d4f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; @@ -381,6 +382,69 @@ public void testDefaultField() throws Exception { assertEquals(expected, query); } + public void testWithStopWords() throws Exception { + Query query = new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .analyzer("stop") + .toQuery(createShardContext()); + Query expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(), + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")), BooleanClause.Occur.SHOULD) + .build() + ), 0f); + assertEquals(expected, query); + + query = new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new MultiMatchQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index a2e6018d0ef6b..76479791283b4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -124,10 +124,6 @@ protected void doAssertLuceneQuery(NestedQueryBuilder queryBuilder, Query query, public void testSerializationBWC() throws IOException { for (Version version : VersionUtils.allReleasedVersions()) { NestedQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } assertSerialization(testQuery, version); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 87197b662d142..b0ee32548737a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -998,6 +998,18 @@ public void testExistsFieldQuery() throws Exception { } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); } + + for (boolean quoted : new boolean[] {true, false}) { + String value = (quoted ? "\"" : "") + STRING_FIELD_NAME + (quoted ? "\"" : ""); + queryBuilder = new QueryStringQueryBuilder("_exists_:" + value); + query = queryBuilder.toQuery(context); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) + && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); + } else { + assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); + } + } QueryShardContext contextNoType = createShardContextWithNoType(); query = queryBuilder.toQuery(contextNoType); assertThat(query, equalTo(new MatchNoDocsQuery())); @@ -1266,11 +1278,58 @@ public void testWithStopWords() throws Exception { .field(STRING_FIELD_NAME) .analyzer("stop") .toQuery(createShardContext()); - BooleanQuery expected = new BooleanQuery.Builder() - .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), Occur.SHOULD) - .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), Occur.SHOULD) + Query expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), Occur.SHOULD) + .build(), + new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")), Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")), Occur.SHOULD) + .build() + ), 0f); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + assertEquals(new BooleanQuery.Builder().build(), query); + + query = new BoolQueryBuilder() + .should( + new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder().build(), BooleanClause.Occur.SHOULD) .build(); assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new QueryStringQueryBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + assertEquals(expected, query); } public void testWithPrefixStopWords() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 6cde10308c6e7..36da37c44c66f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -613,11 +613,59 @@ public void testWithStopWords() throws Exception { .field(STRING_FIELD_NAME) .analyzer("stop") .toQuery(createShardContext()); - BooleanQuery expected = new BooleanQuery.Builder() + Query expected = new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "quick")), BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(STRING_FIELD_NAME, "fox")), BooleanClause.Occur.SHOULD) .build(); assertEquals(expected, query); + + query = new SimpleQueryStringBuilder("the quick fox") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "quick")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "quick")) + ), 1.0f), BooleanClause.Occur.SHOULD) + .add(new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "fox")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "fox")) + ), 1.0f), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + .toQuery(createShardContext()); + assertEquals(new MatchNoDocsQuery(), query); + + query = new BoolQueryBuilder() + .should( + new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .analyzer("stop") + ) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.SHOULD) + .build(); + assertEquals(expected, query); + + query = new BoolQueryBuilder() + .should( + new SimpleQueryStringBuilder("the") + .field(STRING_FIELD_NAME) + .field(STRING_FIELD_NAME_2) + .analyzer("stop") + ) + .toQuery(createShardContext()); + assertEquals(expected, query); } public void testWithPrefixStopWords() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index 9e5383a259adc..dff07e0f215e7 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -33,7 +32,6 @@ import java.util.stream.IntStream; import static java.lang.Math.abs; -import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; @@ -45,15 +43,6 @@ public void testBulkByTaskStatus() throws IOException { status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); assertTaskStatusEquals(out.getVersion(), status, tripped); - - // Also check round tripping pre-5.1 which is the first version to support parallelized scroll - out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0_rc1); // This can be V_5_0_0 - status.writeTo(out); - StreamInput in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0_rc1); - tripped = new BulkByScrollTask.Status(in); - assertTaskStatusEquals(Version.V_5_0_0_rc1, status, tripped); } /** @@ -74,23 +63,19 @@ public static void assertTaskStatusEquals(Version version, BulkByScrollTask.Stat assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f); assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled()); assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil()); - if (version.onOrAfter(Version.V_5_1_1)) { - assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); - for (int i = 0; i < expected.getSliceStatuses().size(); i++) { - BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); - if (sliceStatus == null) { - assertNull(actual.getSliceStatuses().get(i)); - } else if (sliceStatus.getException() == null) { - assertNull(actual.getSliceStatuses().get(i).getException()); - assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); - } else { - assertNull(actual.getSliceStatuses().get(i).getStatus()); - // Just check the message because we're not testing exception serialization in general here. - assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); - } + assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size())); + for (int i = 0; i < expected.getSliceStatuses().size(); i++) { + BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); + if (sliceStatus == null) { + assertNull(actual.getSliceStatuses().get(i)); + } else if (sliceStatus.getException() == null) { + assertNull(actual.getSliceStatuses().get(i).getException()); + assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); + } else { + assertNull(actual.getSliceStatuses().get(i).getStatus()); + // Just check the message because we're not testing exception serialization in general here. + assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); } - } else { - assertEquals(emptyList(), actual.getSliceStatuses()); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index e7109979332b4..b93f170174c3c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -134,7 +134,7 @@ indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilari (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm, EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); - engine.recoverFromTranslog(); + engine.recoverFromTranslog(Long.MAX_VALUE); listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 95772910747c4..04d15d39b58e9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -77,48 +77,4 @@ public void testGetForUpdate() throws IOException { closeShards(primary); } - - public void testGetForUpdateWithParentField() throws IOException { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put("index.version.created", Version.V_5_6_0) // for parent field mapper - .build(); - IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1).build(); - IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); - recoverShardFromStore(primary); - Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); - assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we refreshed - } - - Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog - } - primary.getEngine().refresh("test"); - try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { - assertEquals(searcher.reader().maxDoc(), 2); - } - - // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); - assertTrue(primary.getEngine().refreshNeeded()); - testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - - closeShards(primary); - } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 4ec479334ba67..a0e0c481e5f86 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -360,7 +360,8 @@ public void testSimpleOperations() throws IOException { } markCurrentGenAsCommitted(translog); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(firstId + 1)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), firstId + 1), randomNonNegativeLong())) { assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.totalOperations(), equalTo(0)); } @@ -645,6 +646,82 @@ public void testSnapshotOnClosedTranslog() throws IOException { } } + public void testSnapshotFromMinGen() throws Exception { + Map> operationsByGen = new HashMap<>(); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), 1), randomNonNegativeLong())) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + long currentGeneration = translog.currentFileGeneration(); + operationsByGen.putIfAbsent(currentGeneration, new ArrayList<>()); + int numOps = between(0, 20); + for (int op = 0; op < numOps; op++) { + long seqNo = randomLongBetween(0, 1000); + addToTranslogAndList(translog, operationsByGen.get(currentGeneration), new Translog.Index("test", + Long.toString(seqNo), seqNo, primaryTerm.get(), new byte[]{1})); + } + long minGen = randomLongBetween(translog.getMinFileGeneration(), translog.currentFileGeneration()); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), Long.MAX_VALUE)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream()) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + long upToSeqNo = randomLongBetween(0, 2000); + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(translog.getTranslogUUID(), minGen), upToSeqNo)) { + List expectedOps = operationsByGen.entrySet().stream() + .filter(e -> e.getKey() >= minGen) + .flatMap(e -> e.getValue().stream().filter(op -> op.seqNo() <= upToSeqNo)) + .collect(Collectors.toList()); + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedOps)); + } + translog.rollGeneration(); + } + } + + public void testSeqNoFilterSnapshot() throws Exception { + final int generations = between(2, 20); + for (int gen = 0; gen < generations; gen++) { + List batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList()); + Randomness.shuffle(batch); + for (long seqNo : batch) { + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); + translog.add(op); + } + translog.rollGeneration(); + } + List operations = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + } + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, between(200, 300), between(300, 400)); // out range + assertThat(filter, SnapshotMatchers.size(0)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.totalOperations())); + } + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + int fromSeqNo = between(-2, 500); + int toSeqNo = between(fromSeqNo, 500); + List selectedOps = operations.stream() + .filter(op -> fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo).collect(Collectors.toList()); + Translog.Snapshot filter = new Translog.SeqNoFilterSnapshot(snapshot, fromSeqNo, toSeqNo); + assertThat(filter, SnapshotMatchers.containsOperationsInAnyOrder(selectedOps)); + assertThat(filter.totalOperations(), equalTo(snapshot.totalOperations())); + assertThat(filter.overriddenOperations(), equalTo(snapshot.overriddenOperations())); + assertThat(filter.skippedOperations(), equalTo(snapshot.skippedOperations() + operations.size() - selectedOps.size())); + } + } + public void assertFileIsPresent(Translog translog, long id) { if (Files.exists(translog.location().resolve(Translog.getFilename(id)))) { return; @@ -1304,7 +1381,7 @@ public void testBasicRecovery() throws IOException { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = minUncommittedOp; i < translogOperations; i++) { assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation); @@ -1735,7 +1812,7 @@ public void testOpenForeignTranslog() throws IOException { } this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { + try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); assertNotNull("" + i, next); @@ -2557,7 +2634,8 @@ public void testWithRandomException() throws IOException { generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { + Translog.Snapshot snapshot = translog.newSnapshotFromGen( + new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { Translog.Operation next = snapshot.next(); diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 47f30e10ef912..485fd92099630 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -136,7 +136,7 @@ public void testAnalyzerAliasNotAllowedPost5x() throws IOException { .put("index.analysis.analyzer.foobar.type", "standard") .put("index.analysis.analyzer.foobar.alias","foobaz") // analyzer aliases were removed in v5.0.0 alpha6 - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_beta1, null)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); AnalysisRegistry registry = getNewRegistry(settings); @@ -149,7 +149,7 @@ public void testVersionedAnalyzers() throws Exception { Settings settings2 = Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); @@ -162,9 +162,9 @@ public void testVersionedAnalyzers() throws Exception { // analysis service has the expected version assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(Version.V_5_0_0.luceneVersion, + assertEquals(Version.V_6_0_0.luceneVersion, indexAnalyzers.get("stop").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 8edf0f45cfbbf..ce162b9600cf4 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -729,7 +729,7 @@ public void testMultiIndex() throws Exception { public void testFieldDataFieldsParam() throws Exception { assertAcked(client().admin().indices().prepareCreate("test1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) + .setSettings(Settings.builder().put("index.version.created", Version.V_6_0_0.id)) .addMapping("_doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true").get()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 338e5b662c5da..4c2352bfebe7d 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -97,12 +97,12 @@ public void testFailStartNode() throws Exception { AcknowledgedResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); assertThat(response.isAcknowledged(), is(true)); - Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipelineStore().get("_id"); + Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipeline("_id"); assertThat(pipeline, notNullValue()); installPlugin = false; String node2 = internalCluster().startNode(); - pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipelineStore().get("_id"); + pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipeline("_id"); assertNotNull(pipeline); assertThat(pipeline.getId(), equalTo("_id")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index c0353acb7f9d0..83a5bef4de279 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -21,16 +21,69 @@ import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; import java.util.Map; - -import org.elasticsearch.common.settings.Settings; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.mockito.Mockito; +import org.hamcrest.CustomTypeSafeMatcher; +import org.mockito.ArgumentMatcher; +import org.mockito.invocation.InvocationOnMock; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IngestServiceTests extends ESTestCase { - private final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { + + private static final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { @Override public Map getProcessors(Processor.Parameters parameters) { return Collections.singletonMap("foo", (factories, tag, config) -> null); @@ -38,19 +91,812 @@ public Map getProcessors(Processor.Parameters paramet }; public void testIngestPlugin() { - ThreadPool tp = Mockito.mock(ThreadPool.class); - IngestService ingestService = new IngestService(Settings.EMPTY, tp, null, null, + ThreadPool tp = mock(ThreadPool.class); + IngestService ingestService = new IngestService(mock(ClusterService.class), tp, null, null, null, Collections.singletonList(DUMMY_PLUGIN)); - Map factories = ingestService.getPipelineStore().getProcessorFactories(); + Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); assertEquals(1, factories.size()); } public void testIngestPluginDuplicate() { - ThreadPool tp = Mockito.mock(ThreadPool.class); + ThreadPool tp = mock(ThreadPool.class); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - new IngestService(Settings.EMPTY, tp, null, null, + new IngestService(mock(ClusterService.class), tp, null, null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN))); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); } + + public void testExecuteIndexPipelineDoesNotExist() { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + IngestService ingestService = new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(DUMMY_PLUGIN)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + + final SetOnce failure = new SetOnce<>(); + final BiConsumer failureHandler = (request, e) -> { + failure.set(true); + assertThat(request, sameInstance(indexRequest)); + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testUpdatePipelines() { + IngestService ingestService = createWithProcessors(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(0)); + + PipelineConfiguration pipeline = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.pipelines().size(), is(1)); + assertThat(ingestService.pipelines().get("_id").getId(), equalTo("_id")); + assertThat(ingestService.pipelines().get("_id").getDescription(), nullValue()); + assertThat(ingestService.pipelines().get("_id").getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id").getProcessors().get(0).getType(), equalTo("set")); + } + + public void testDelete() { + IngestService ingestService = createWithProcessors(); + PipelineConfiguration config = new PipelineConfiguration( + "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), notNullValue()); + + // Delete pipeline: + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("_id"), nullValue()); + + // Delete existing pipeline: + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); + } + } + + public void testValidateNoIngestInfo() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + Exception e = expectThrows(IllegalStateException.class, () -> ingestService.validatePipeline(emptyMap(), putRequest)); + assertEquals("Ingest info is empty", e.getMessage()); + + DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); + ingestService.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); + } + + public void testCrud() throws Exception { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + } + + public void testPut() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + // add a new pipeline: + PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), nullValue()); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + + // overwrite existing pipeline: + putRequest = + new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + pipeline = ingestService.getPipeline(id); + assertThat(pipeline, notNullValue()); + assertThat(pipeline.getId(), equalTo(id)); + assertThat(pipeline.getDescription(), equalTo("_description")); + assertThat(pipeline.getProcessors().size(), equalTo(0)); + } + + public void testPutWithErrorResponse() { + IngestService ingestService = createWithProcessors(); + String id = "_id"; + Pipeline pipeline = ingestService.getPipeline(id); + assertThat(pipeline, nullValue()); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + + PutPipelineRequest putRequest = + new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + try { + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[processors] required property is missing")); + } + pipeline = ingestService.getPipeline(id); + assertNotNull(pipeline); + assertThat(pipeline.getId(), equalTo("_id")); + assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + + " id [_id] could not be loaded")); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertNull(pipeline.getProcessors().get(0).getTag()); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); + } + + public void testDeleteUsingWildcard() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); + pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + assertThat(ingestService.getPipeline("p2"), notNullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Delete pipeline matching wildcard + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(deleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), notNullValue()); + + // Exception if we used name which does not exist + try { + IngestService.innerDelete(new DeletePipelineRequest("unknown"), clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); + } + + // match all wildcard works on last remaining pipeline + DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); + previousClusterState = clusterState; + clusterState = IngestService.innerDelete(matchAllDeleteRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), nullValue()); + assertThat(ingestService.getPipeline("p2"), nullValue()); + assertThat(ingestService.getPipeline("q1"), nullValue()); + + // match all wildcard does not throw exception if none match + IngestService.innerDelete(matchAllDeleteRequest, clusterState); + } + + public void testDeleteWithExistingUnmatchedPipelines() { + IngestService ingestService = createWithProcessors(); + HashMap pipelines = new HashMap<>(); + BytesArray definition = new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" + ); + pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + IngestMetadata ingestMetadata = new IngestMetadata(pipelines); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() + .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + assertThat(ingestService.getPipeline("p1"), notNullValue()); + + DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); + try { + IngestService.innerDelete(deleteRequest, clusterState); + fail("exception expected"); + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); + } + } + + public void testGetPipelines() { + Map configs = new HashMap<>(); + configs.put("_id1", new PipelineConfiguration( + "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + configs.put("_id2", new PipelineConfiguration( + "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON + )); + + assertThat(IngestService.innerGetPipelines(null, "_id1").isEmpty(), is(true)); + + IngestMetadata ingestMetadata = new IngestMetadata(configs); + List pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1"); + assertThat(pipelines.size(), equalTo(1)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id1", "_id2"); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "_id*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + // get all variants: (no IDs or '*') + pipelines = IngestService.innerGetPipelines(ingestMetadata); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + + pipelines = IngestService.innerGetPipelines(ingestMetadata, "*"); + pipelines.sort(Comparator.comparing(PipelineConfiguration::getId)); + assertThat(pipelines.size(), equalTo(2)); + assertThat(pipelines.get(0).getId(), equalTo("_id1")); + assertThat(pipelines.get(1).getId(), equalTo("_id2")); + } + + public void testValidate() throws Exception { + IngestService ingestService = createWithProcessors(); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( + "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), + XContentType.JSON); + + DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), + emptyMap(), emptySet(), Version.CURRENT); + Map ingestInfos = new HashMap<>(); + ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); + + ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> ingestService.validatePipeline(ingestInfos, putRequest)); + assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); + assertEquals("remove", e.getMetadata("es.processor_type").get(0)); + assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); + + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestService.validatePipeline(ingestInfos, putRequest); + } + + public void testExecuteIndexPipelineExistsButFailedParsing() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> new AbstractProcessor("mock") { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException("error"); + } + + @Override + public String getType() { + return null; + } + } + )); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + String id = "_id"; + PutPipelineRequest putRequest = new PutPipelineRequest(id, + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final SetOnce failure = new SetOnce<>(); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline(id); + final BiConsumer failureHandler = (request, e) -> { + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); + assertThat(e.getCause().getCause().getMessage(), equalTo("error")); + failure.set(true); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteBulkPipelineDoesNotExist() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + BulkRequest bulkRequest = new BulkRequest(); + + IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + bulkRequest.add(indexRequest1); + IndexRequest indexRequest2 = + new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); + bulkRequest.add(indexRequest2); + @SuppressWarnings("unchecked") + BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); + verify(failureHandler, times(1)).accept( + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IndexRequest item) { + return item == indexRequest2; + } + + }), + argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { + @Override + protected boolean matchesSafely(IllegalArgumentException iae) { + return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); + } + }) + ); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccess() { + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> mock(CompoundProcessor.class))); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteEmptyPipeline() throws Exception { + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecutePropagateAllMetaDataUpdates() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final long newVersion = randomLong(); + final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); + doAnswer((InvocationOnMock invocationOnMock) -> { + IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; + for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { + if (metaData == IngestDocument.MetaData.VERSION) { + ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); + } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { + ingestDocument.setFieldValue(metaData.getFieldName(), versionType); + } else { + ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); + } + } + return null; + }).when(processor).execute(any()); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(any()); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + assertThat(indexRequest.index(), equalTo("update_index")); + assertThat(indexRequest.type(), equalTo("update_type")); + assertThat(indexRequest.id(), equalTo("update_id")); + assertThat(indexRequest.routing(), equalTo("update_routing")); + assertThat(indexRequest.version(), equalTo(newVersion)); + assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); + } + + public void testExecuteFailure() throws Exception { + final CompoundProcessor processor = mock(CompoundProcessor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteSuccessWithOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + when(processor.getType()).thenReturn("mock_processor_type"); + when(processor.getTag()).thenReturn("mock_processor_tag"); + final Processor onFailureProcessor = mock(Processor.class); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testExecuteFailureWithNestedOnFailure() throws Exception { + final Processor processor = mock(Processor.class); + final Processor onFailureProcessor = mock(Processor.class); + final Processor onFailureOnFailureProcessor = mock(Processor.class); + final List processors = Collections.singletonList(onFailureProcessor); + final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, + Collections.singletonList(processor), + Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> compoundProcessor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(onFailureOnFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(onFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + @SuppressWarnings("unchecked") + final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecutionWithFailures() throws Exception { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + int numIndexRequests = 0; + for (int i = 0; i < numRequest; i++) { + DocWriteRequest request; + if (randomBoolean()) { + if (randomBoolean()) { + request = new DeleteRequest("_index", "_type", "_id"); + } else { + request = new UpdateRequest("_index", "_type", "_id"); + } + } else { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + request = indexRequest; + numIndexRequests++; + } + bulkRequest.add(request); + } + + CompoundProcessor processor = mock(CompoundProcessor.class); + when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); + Exception error = new RuntimeException(); + doThrow(error).when(processor).execute(any()); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); + + verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), argThat(new ArgumentMatcher() { + @Override + public boolean matches(final Object o) { + return ((Exception)o).getCause().getCause().equals(error); + } + })); + verify(completionHandler, times(1)).accept(null); + } + + public void testBulkRequestExecution() { + BulkRequest bulkRequest = new BulkRequest(); + String pipelineId = "_id"; + + int numRequest = scaledRandomIntBetween(8, 64); + for (int i = 0; i < numRequest; i++) { + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + bulkRequest.add(indexRequest); + } + + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = + new PutPipelineRequest("_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + @SuppressWarnings("unchecked") + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") + Consumer completionHandler = mock(Consumer.class); + ingestService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); + + verify(requestItemErrorHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); + } + + public void testStats() { + final Processor processor = mock(Processor.class); + IngestService ingestService = createWithProcessors(Collections.singletonMap( + "mock", (factories, tag, config) -> processor)); + final IngestStats initialStats = ingestService.stats(); + assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + + PutPipelineRequest putRequest = new PutPipelineRequest("_id1", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + putRequest = new PutPipelineRequest("_id2", + new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + final Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + + @SuppressWarnings("unchecked") final BiConsumer failureHandler = mock(BiConsumer.class); + @SuppressWarnings("unchecked") final Consumer completionHandler = mock(Consumer.class); + + final IndexRequest indexRequest = new IndexRequest("_index"); + indexRequest.setPipeline("_id1"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterFirstRequestStats = ingestService.stats(); + assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); + + indexRequest.setPipeline("_id2"); + ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterSecondRequestStats = ingestService.stats(); + assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); + } + + // issue: https://github.com/elastic/elasticsearch/issues/18126 + public void testUpdatingStatsWhenRemovingPipelineWorks() { + IngestService ingestService = createWithProcessors(); + Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id1")); + assertThat(ingestService.stats().getStatsPerPipeline(), hasKey("_id2")); + + configurationMap = new HashMap<>(); + configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); + ingestService.updatePipelineStats(new IngestMetadata(configurationMap)); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); + assertThat(ingestService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); + } + + private IngestDocument eqIndexTypeId(final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); + } + + private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); + } + + private static IngestService createWithProcessors() { + Map processors = new HashMap<>(); + processors.put("set", (factories, tag, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new Processor() { + @Override + public void execute(IngestDocument ingestDocument) { + ingestDocument.setFieldValue(field, value); + } + + @Override + public String getType() { + return "set"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + processors.put("remove", (factories, tag, config) -> { + String field = (String) config.remove("field"); + return new Processor() { + @Override + public void execute(IngestDocument ingestDocument) { + ingestDocument.removeField(field); + } + + @Override + public String getType() { + return "remove"; + } + + @Override + public String getTag() { + return tag; + } + }; + }); + return createWithProcessors(processors); + } + + private static IngestService createWithProcessors(Map processors) { + ThreadPool threadPool = mock(ThreadPool.class); + final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + when(threadPool.executor(anyString())).thenReturn(executorService); + return new IngestService(mock(ClusterService.class), threadPool, null, null, + null, Collections.singletonList(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + })); + } + + private class IngestDocumentMatcher extends ArgumentMatcher { + + private final IngestDocument ingestDocument; + + IngestDocumentMatcher(String index, String type, String id, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); + } + + IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { + this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); + } + + @Override + public boolean matches(Object o) { + if (o.getClass() == IngestDocument.class) { + IngestDocument otherIngestDocument = (IngestDocument) o; + //ingest metadata will not be the same (timestamp differs every time) + return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); + } + return false; + } + } } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java deleted file mode 100644 index 15a23421da26a..0000000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.hamcrest.CustomTypeSafeMatcher; -import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.invocation.InvocationOnMock; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutorService; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class PipelineExecutionServiceTests extends ESTestCase { - - private final Integer version = randomBoolean() ? randomInt() : null; - private PipelineStore store; - private PipelineExecutionService executionService; - - @Before - public void setup() { - store = mock(PipelineStore.class); - ThreadPool threadPool = mock(ThreadPool.class); - final ExecutorService executorService = EsExecutors.newDirectExecutorService(); - when(threadPool.executor(anyString())).thenReturn(executorService); - executionService = new PipelineExecutionService(store, threadPool); - } - - public void testExecuteIndexPipelineDoesNotExist() { - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - - final SetOnce failure = new SetOnce<>(); - final BiConsumer failureHandler = (request, e) -> { - failure.set(true); - assertThat(request, sameInstance(indexRequest)); - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteIndexPipelineExistsButFailedParsing() { - when(store.get("_id")).thenReturn(new Pipeline("_id", "stub", null, - new CompoundProcessor(new AbstractProcessor("mock") { - @Override - public void execute(IngestDocument ingestDocument) { - throw new IllegalStateException("error"); - } - - @Override - public String getType() { - return null; - } - }))); - - final SetOnce failure = new SetOnce<>(); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - final BiConsumer failureHandler = (request, e) -> { - assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); - assertThat(e.getCause().getCause().getMessage(), equalTo("error")); - failure.set(true); - }; - - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - - assertTrue(failure.get()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteBulkPipelineDoesNotExist() { - CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - BulkRequest bulkRequest = new BulkRequest(); - - IndexRequest indexRequest1 = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - bulkRequest.add(indexRequest1); - IndexRequest indexRequest2 = - new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); - bulkRequest.add(indexRequest2); - @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); - verify(failureHandler, times(1)).accept( - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IndexRequest item) { - return item == indexRequest2; - } - - }), - argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { - @Override - protected boolean matchesSafely(IllegalArgumentException iae) { - return "pipeline with id [does_not_exist] does not exist".equals(iae.getMessage()); - } - }) - ); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccess() { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteEmptyPipeline() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - when(processor.getProcessors()).thenReturn(Collections.emptyList()); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor, never()).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecutePropagateAllMetaDataUpdates() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - final long newVersion = randomLong(); - final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); - doAnswer((InvocationOnMock invocationOnMock) -> { - IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; - for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { - if (metaData == IngestDocument.MetaData.VERSION) { - ingestDocument.setFieldValue(metaData.getFieldName(), newVersion); - } else if (metaData == IngestDocument.MetaData.VERSION_TYPE) { - ingestDocument.setFieldValue(metaData.getFieldName(), versionType); - } else { - ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); - } - } - return null; - }).when(processor).execute(any()); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(any()); - verify(failureHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - assertThat(indexRequest.index(), equalTo("update_index")); - assertThat(indexRequest.type(), equalTo("update_type")); - assertThat(indexRequest.id(), equalTo("update_id")); - assertThat(indexRequest.routing(), equalTo("update_routing")); - assertThat(indexRequest.version(), equalTo(newVersion)); - assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); - } - - public void testExecuteFailure() throws Exception { - final CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteSuccessWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - when(processor.getType()).thenReturn("mock_processor_type"); - when(processor.getTag()).thenReturn("mock_processor_tag"); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testExecuteFailureWithNestedOnFailure() throws Exception { - final Processor processor = mock(Processor.class); - final Processor onFailureProcessor = mock(Processor.class); - final Processor onFailureOnFailureProcessor = mock(Processor.class); - final List processors = Collections.singletonList(onFailureProcessor); - final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); - final CompoundProcessor compoundProcessor = new CompoundProcessor( - false, - Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); - when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()) - .when(onFailureOnFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(onFailureProcessor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()) - .when(processor) - .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecutionWithFailures() throws Exception { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - int numIndexRequests = 0; - for (int i = 0; i < numRequest; i++) { - DocWriteRequest request; - if (randomBoolean()) { - if (randomBoolean()) { - request = new DeleteRequest("_index", "_type", "_id"); - } else { - request = new UpdateRequest("_index", "_type", "_id"); - } - } else { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - request = indexRequest; - numIndexRequests++; - } - bulkRequest.add(request); - } - - CompoundProcessor processor = mock(CompoundProcessor.class); - when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - Exception error = new RuntimeException(); - doThrow(error).when(processor).execute(any()); - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, processor)); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), eq(error)); - verify(completionHandler, times(1)).accept(null); - } - - public void testBulkRequestExecution() { - BulkRequest bulkRequest = new BulkRequest(); - String pipelineId = "_id"; - - int numRequest = scaledRandomIntBetween(8, 64); - for (int i = 0; i < numRequest; i++) { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - bulkRequest.add(indexRequest); - } - - when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, new CompoundProcessor())); - - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); - - verify(requestItemErrorHandler, never()).accept(any(), any()); - verify(completionHandler, times(1)).accept(null); - } - - public void testStats() { - final IngestStats initialStats = executionService.stats(); - assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); - assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); - assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); - - when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, version, new CompoundProcessor(mock(Processor.class)))); - when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, null, new CompoundProcessor(mock(Processor.class)))); - - final Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - - @SuppressWarnings("unchecked") - final BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final Consumer completionHandler = mock(Consumer.class); - - final IndexRequest indexRequest = new IndexRequest("_index"); - indexRequest.setPipeline("_id1"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterFirstRequestStats = executionService.stats(); - assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); - assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); - - indexRequest.setPipeline("_id2"); - executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); - final IngestStats afterSecondRequestStats = executionService.stats(); - assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); - assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); - } - - // issue: https://github.com/elastic/elasticsearch/issues/18126 - public void testUpdatingStatsWhenRemovingPipelineWorks() { - Map configurationMap = new HashMap<>(); - configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); - configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id1")); - assertThat(executionService.stats().getStatsPerPipeline(), hasKey("_id2")); - - configurationMap = new HashMap<>(); - configurationMap.put("_id3", new PipelineConfiguration("_id3", new BytesArray("{}"), XContentType.JSON)); - executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id1"))); - assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); - } - - private IngestDocument eqIndexTypeId(final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); - } - - private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { - return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); - } - - private class IngestDocumentMatcher extends ArgumentMatcher { - - private final IngestDocument ingestDocument; - - IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); - } - - IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); - } - - @Override - public boolean matches(Object o) { - if (o.getClass() == IngestDocument.class) { - IngestDocument otherIngestDocument = (IngestDocument) o; - //ingest metadata will not be the same (timestamp differs every time) - return Objects.equals(ingestDocument.getSourceAndMetadata(), otherIngestDocument.getSourceAndMetadata()); - } - return false; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java index 461873a3fe3d2..cafdbcfb44690 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java @@ -47,9 +47,8 @@ public void testCreate() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -64,9 +63,8 @@ public void testCreateWithNoProcessorsField() throws Exception { Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); - Pipeline.Factory factory = new Pipeline.Factory(); try { - factory.create("_id", pipelineConfig, Collections.emptyMap()); + Pipeline.create("_id", pipelineConfig, Collections.emptyMap()); fail("should fail, missing required [processors] field"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); @@ -78,8 +76,7 @@ public void testCreateWithEmptyProcessorsField() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); - Pipeline pipeline = factory.create("_id", pipelineConfig, null); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -93,9 +90,8 @@ public void testCreateWithPipelineOnFailure() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -112,9 +108,8 @@ public void testCreateWithPipelineEmptyOnFailure() throws Exception { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList()); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); assertThat(e.getMessage(), equalTo("pipeline [_id] cannot have an empty on_failure option defined")); } @@ -125,9 +120,8 @@ public void testCreateWithPipelineEmptyOnFailureInProcessor() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); assertThat(e.getMessage(), equalTo("[on_failure] processors list cannot be empty")); } @@ -136,14 +130,13 @@ public void testCreateWithPipelineIgnoreFailure() throws Exception { processorConfig.put("ignore_failure", true); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline.Factory factory = new Pipeline.Factory(); Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -162,9 +155,8 @@ public void testCreateUnusedProcessorOptions() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } @@ -176,9 +168,8 @@ public void testCreateProcessorsWithOnFailureProperties() throws Exception { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline.Factory factory = new Pipeline.Factory(); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = factory.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java deleted file mode 100644 index d0ce465fc9ef8..0000000000000 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class PipelineStoreTests extends ESTestCase { - - private PipelineStore store; - - @Before - public void init() throws Exception { - Map processorFactories = new HashMap<>(); - processorFactories.put("set", (factories, tag, config) -> { - String field = (String) config.remove("field"); - String value = (String) config.remove("value"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.setFieldValue(field, value); - } - - @Override - public String getType() { - return "set"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - processorFactories.put("remove", (factories, tag, config) -> { - String field = (String) config.remove("field"); - return new Processor() { - @Override - public void execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.removeField(field); - } - - @Override - public String getType() { - return "remove"; - } - - @Override - public String getTag() { - return tag; - } - }; - }); - store = new PipelineStore(Settings.EMPTY, processorFactories); - } - - public void testUpdatePipelines() { - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(0)); - - PipelineConfiguration pipeline = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); - clusterState = ClusterState.builder(clusterState) - .metaData(MetaData.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) - .build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.pipelines.size(), is(1)); - assertThat(store.pipelines.get("_id").getId(), equalTo("_id")); - assertThat(store.pipelines.get("_id").getDescription(), nullValue()); - assertThat(store.pipelines.get("_id").getProcessors().size(), equalTo(1)); - assertThat(store.pipelines.get("_id").getProcessors().get(0).getType(), equalTo("set")); - } - - public void testPut() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - // add a new pipeline: - PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - - // overwrite existing pipeline: - putRequest = - new PutPipelineRequest(id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), XContentType.JSON); - previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), equalTo("_description")); - assertThat(pipeline.getProcessors().size(), equalTo(0)); - } - - public void testPutWithErrorResponse() { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - - PutPipelineRequest putRequest = - new PutPipelineRequest(id, new BytesArray("{\"description\": \"empty processors\"}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - try { - store.innerUpdatePipelines(previousClusterState, clusterState); - fail("should fail"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[processors] required property is missing")); - } - pipeline = store.get(id); - assertNotNull(pipeline); - assertThat(pipeline.getId(), equalTo("_id")); - assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + - " id [_id] could not be loaded")); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertNull(pipeline.getProcessors().get(0).getTag()); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); - } - - public void testDelete() { - PipelineConfiguration config = new PipelineConfiguration( - "_id",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON - ); - IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), notNullValue()); - - // Delete pipeline: - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("_id"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("_id"), nullValue()); - - // Delete existing pipeline: - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [_id] is missing")); - } - } - - public void testDeleteUsingWildcard() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); - pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - assertThat(store.get("p2"), notNullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Delete pipeline matching wildcard - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("p*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), notNullValue()); - - // Exception if we used name which does not exist - try { - store.innerDelete(new DeletePipelineRequest("unknown"), clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [unknown] is missing")); - } - - // match all wildcard works on last remaining pipeline - DeletePipelineRequest matchAllDeleteRequest = new DeletePipelineRequest("*"); - previousClusterState = clusterState; - clusterState = store.innerDelete(matchAllDeleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), nullValue()); - assertThat(store.get("p2"), nullValue()); - assertThat(store.get("q1"), nullValue()); - - // match all wildcard does not throw exception if none match - store.innerDelete(matchAllDeleteRequest, clusterState); - } - - public void testDeleteWithExistingUnmatchedPipelines() { - HashMap pipelines = new HashMap<>(); - BytesArray definition = new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}" - ); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - IngestMetadata ingestMetadata = new IngestMetadata(pipelines); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder() - .putCustom(IngestMetadata.TYPE, ingestMetadata)).build(); - store.innerUpdatePipelines(previousClusterState, clusterState); - assertThat(store.get("p1"), notNullValue()); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest("z*"); - try { - store.innerDelete(deleteRequest, clusterState); - fail("exception expected"); - } catch (ResourceNotFoundException e) { - assertThat(e.getMessage(), equalTo("pipeline [z*] is missing")); - } - } - - public void testGetPipelines() { - Map configs = new HashMap<>(); - configs.put("_id1", new PipelineConfiguration( - "_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - configs.put("_id2", new PipelineConfiguration( - "_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON - )); - - assertThat(store.innerGetPipelines(null, "_id1").isEmpty(), is(true)); - - IngestMetadata ingestMetadata = new IngestMetadata(configs); - List pipelines = store.innerGetPipelines(ingestMetadata, "_id1"); - assertThat(pipelines.size(), equalTo(1)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id1", "_id2"); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "_id*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - // get all variants: (no IDs or '*') - pipelines = store.innerGetPipelines(ingestMetadata); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - - pipelines = store.innerGetPipelines(ingestMetadata, "*"); - pipelines.sort((o1, o2) -> o1.getId().compareTo(o2.getId())); - assertThat(pipelines.size(), equalTo(2)); - assertThat(pipelines.get(0).getId(), equalTo("_id1")); - assertThat(pipelines.get(1).getId(), equalTo("_id2")); - } - - public void testCrud() throws Exception { - String id = "_id"; - Pipeline pipeline = store.get(id); - assertThat(pipeline, nullValue()); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty - - PutPipelineRequest putRequest = new PutPipelineRequest(id, - new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - ClusterState previousClusterState = clusterState; - clusterState = store.innerPut(putRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, notNullValue()); - assertThat(pipeline.getId(), equalTo(id)); - assertThat(pipeline.getDescription(), nullValue()); - assertThat(pipeline.getProcessors().size(), equalTo(1)); - assertThat(pipeline.getProcessors().get(0).getType(), equalTo("set")); - - DeletePipelineRequest deleteRequest = new DeletePipelineRequest(id); - previousClusterState = clusterState; - clusterState = store.innerDelete(deleteRequest, clusterState); - store.innerUpdatePipelines(previousClusterState, clusterState); - pipeline = store.get(id); - assertThat(pipeline, nullValue()); - } - - public void testValidate() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + - "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}"), - XContentType.JSON); - - DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - Map ingestInfos = new HashMap<>(); - ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); - - ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> store.validatePipeline(ingestInfos, putRequest)); - assertEquals("Processor type [remove] is not installed on node [" + node2 + "]", e.getMessage()); - assertEquals("remove", e.getMetadata("es.processor_type").get(0)); - assertEquals("tag2", e.getMetadata("es.processor_tag").get(0)); - - ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); - store.validatePipeline(ingestInfos, putRequest); - } - - public void testValidateNoIngestInfo() throws Exception { - PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( - "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON); - Exception e = expectThrows(IllegalStateException.class, () -> store.validatePipeline(Collections.emptyMap(), putRequest)); - assertEquals("Ingest info is empty", e.getMessage()); - - DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT); - IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); - store.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); - } -} diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index c53d798f7b488..d413c0f0be229 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.node.MockNode; @@ -32,6 +33,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; public class IndexStorePluginTests extends ESTestCase { @@ -54,7 +56,30 @@ public Map> getIndexStoreFactories() } - public void testDuplicateIndexStoreProviders() { + public static class ConflictingStorePlugin extends Plugin implements IndexStorePlugin { + + public static final String TYPE; + + static { + TYPE = randomFrom(Arrays.asList(IndexModule.Type.values())).getSettingsKey(); + } + + @Override + public Map> getIndexStoreFactories() { + return Collections.singletonMap(TYPE, IndexStore::new); + } + + } + + public void testIndexStoreFactoryConflictsWithBuiltInIndexStoreType() { + final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + final IllegalStateException e = expectThrows( + IllegalStateException.class, () -> new MockNode(settings, Collections.singletonList(ConflictingStorePlugin.class))); + assertThat(e, hasToString(containsString( + "registered index store type [" + ConflictingStorePlugin.TYPE + "] conflicts with a built-in type"))); + } + + public void testDuplicateIndexStoreFactories() { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final IllegalStateException e = expectThrows( IllegalStateException.class, () -> new MockNode(settings, Arrays.asList(BarStorePlugin.class, FooStorePlugin.class))); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 5f1d1f612d7ad..f6649853eda10 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -590,10 +590,10 @@ public void testNonExtensibleDep() throws Exception { } public void testIncompatibleElasticsearchVersion() throws Exception { - PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_5_0_0, + PluginInfo info = new PluginInfo("my_plugin", "desc", "1.0", Version.V_6_0_0, "1.8", "FakePlugin", Collections.emptyList(), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); - assertThat(e.getMessage(), containsString("was built for Elasticsearch version 5.0.0")); + assertThat(e.getMessage(), containsString("was built for Elasticsearch version 6.0.0")); } public void testIncompatibleJavaVersion() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index c50fb89f334af..ce45d222dd757 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -67,7 +67,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index fc080dd0f04c4..971742aec2d04 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -65,7 +65,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 952eb22848e1a..a74734c622f8d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -321,7 +321,7 @@ public void testIssue11119() throws Exception { assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -356,7 +356,7 @@ public void testIssue11119() throws Exception { assertThat(response.getHits().getTotalHits(), equalTo(8L)); assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(0f)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index aea0243a399d4..45b6340ba6f46 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -810,6 +810,32 @@ public void testDocValueFields() throws Exception { equalTo(new BytesRef(new byte[] {42, 100}))); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*field"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", + "binary_field", "ip_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); + assertThat(dateField.toInstant().toEpochMilli(), equalTo(date.toInstant().toEpochMilli())); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), + equalTo(new BytesRef(new byte[] {42, 100}))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + builder = client().prepareSearch().setQuery(matchAllQuery()) .addDocValueField("text_field", "use_field_mapping") .addDocValueField("keyword_field", "use_field_mapping") @@ -977,6 +1003,70 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { assertThat(fetchedDate, equalTo(date)); } + public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", false) + .endObject() + .startObject("properties") + .startObject("text_field") + .field("type", "text") + .field("fielddata", true) + .endObject() + .startObject("date_field") + .field("type", "date") + .field("format", "yyyy-MM-dd") + .endObject() + .startObject("text_field_alias") + .field("type", "alias") + .field("path", "text_field") + .endObject() + .startObject("date_field_alias") + .field("type", "alias") + .field("path", "date_field") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test").addMapping("type", mapping)); + ensureGreen("test"); + + ZonedDateTime date = ZonedDateTime.of(1990, 12, 29, 0, 0, 0, 0, ZoneOffset.UTC); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ROOT); + + index("test", "type", "1", "text_field", "foo", "date_field", formatter.format(date)); + refresh("test"); + + SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("*alias", "use_field_mapping") + .addDocValueField("date_field"); + SearchResponse searchResponse = builder.execute().actionGet(); + + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), + equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + + ZonedDateTime fetchedDate = dateField.getValue(); + assertThat(fetchedDate, equalTo(date)); + } + public void testStoredFieldsWithFieldAlias() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index a21893db3920f..0a860a636d4aa 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -613,7 +613,7 @@ public void testDateWithoutOrigin() throws Exception { } public void testManyDocsLin() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("test").field("type", "text").endObject().startObject("date").field("type", "date") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 12a64d80a1489..80b40042801b5 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -51,7 +51,7 @@ protected Collection> nodePlugins() { } public void testSimpleBoundingBoxTest() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -123,7 +123,7 @@ public void testSimpleBoundingBoxTest() throws Exception { } public void testLimit2BoundingBox() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -176,7 +176,7 @@ public void testLimit2BoundingBox() throws Exception { } public void testCompleteLonRange() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 5966ea6a49dcc..143fd611c3f5e 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -101,7 +101,7 @@ static Double distanceScript(Map vars, Function> nodePlugins() { @Override protected void setupSuiteScopeCluster() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 16365d829a83b..872267417c37d 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -103,6 +104,7 @@ private void countTestCase(Query query, IndexReader reader, boolean shouldCollec final boolean rescore = QueryPhase.execute(context, searcher, checkCancelled -> {}); assertFalse(rescore); assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); } private void countTestCase(boolean withDeletions) throws Exception { @@ -172,11 +174,14 @@ public void testPostFilterDisablesCountOptimization() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); contextSearcher = new IndexSearcher(reader); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(0, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); reader.close(); dir.close(); } @@ -205,13 +210,13 @@ public void testTerminateAfterWithFilter() throws Exception { context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } reader.close(); dir.close(); } - public void testMinScoreDisablesCountOptimization() throws Exception { Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); @@ -230,11 +235,13 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(1, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); contextSearcher = new IndexSearcher(reader); context.minimumScore(100); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertEquals(0, context.queryResult().topDocs().totalHits); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); reader.close(); dir.close(); } @@ -289,6 +296,7 @@ public void testInOrderScrollOptimization() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); @@ -296,9 +304,11 @@ public void testInOrderScrollOptimization() throws Exception { contextSearcher = getAssertingEarlyTerminationSearcher(reader, size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(size)); assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(size)); reader.close(); dir.close(); @@ -334,12 +344,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } @@ -348,6 +360,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } { @@ -360,6 +373,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); @@ -367,6 +381,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } { @@ -376,6 +391,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(collector.getTotalHits(), equalTo(1)); context.queryCollectors().clear(); @@ -387,6 +403,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); + assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } @@ -539,19 +556,19 @@ public void testIndexSortScrollOptimization() throws Exception { dir.close(); } - static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { + private static IndexSearcher getAssertingEarlyTerminationSearcher(IndexReader reader, int size) { return new IndexSearcher(reader) { protected void search(List leaves, Weight weight, Collector collector) throws IOException { - final Collector in = new AssertingEalyTerminationFilterCollector(collector, size); + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); super.search(leaves, weight, in); } }; } - private static class AssertingEalyTerminationFilterCollector extends FilterCollector { + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { private final int size; - AssertingEalyTerminationFilterCollector(Collector in, int size) { + AssertingEarlyTerminationFilterCollector(Collector in, int size) { super(in); this.size = size; } diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index 5caab8c9dfec6..a90e98a38eefc 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -430,8 +430,8 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { indexRequests.add(client().prepareIndex("test", "_doc", "1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); - // The wildcard field matches aliases for both a text and boolean field. - // By default, the boolean field should be ignored when building the query. + // The wildcard field matches aliases for both a text and geo_point field. + // By default, the geo_point field should be ignored when building the query. SearchResponse response = client().prepareSearch("test") .setQuery(queryStringQuery("text").field("f*_alias")) .execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 965dcb3e8ccf1..e134b20c309f4 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -59,7 +59,7 @@ protected Collection> nodePlugins() { } public void testDistanceSortingMVFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -189,7 +189,7 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -234,7 +234,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties") @@ -383,7 +383,7 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 200043a6668ab..cac5fede848a4 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -70,7 +70,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -136,7 +136,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -197,7 +197,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); + : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index 3c099c32bde2d..bff5a2b122d2f 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -159,7 +159,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertFalse(connection.isClosed()); assertTrue(connectionManager.nodeConnected(node)); assertSame(connection, connectionManager.getConnection(node)); - assertEquals(1, connectionManager.connectedNodeCount()); + assertEquals(1, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); @@ -169,7 +169,7 @@ public void onNodeDisconnected(DiscoveryNode node) { connection.close(); } assertTrue(connection.isClosed()); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(1, nodeConnectedCount.get()); assertEquals(1, nodeDisconnectedCount.get()); } @@ -205,7 +205,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertTrue(connection.isClosed()); assertFalse(connectionManager.nodeConnected(node)); expectThrows(NodeNotConnectedException.class, () -> connectionManager.getConnection(node)); - assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, connectionManager.size()); assertEquals(0, nodeConnectedCount.get()); assertEquals(0, nodeDisconnectedCount.get()); } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 8cfec0a07f910..34e22fd20de7f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -81,13 +81,15 @@ public void testEnsureWeReconnect() throws Exception { try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { Semaphore semaphore = new Semaphore(1); service.start(); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (remoteNode.equals(node)) { - semaphore.release(); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (remoteNode.equals(node)) { + semaphore.release(); + } } - } + }); }); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -95,7 +97,9 @@ public void onNodeDisconnected(DiscoveryNode node) { for (int i = 0; i < 10; i++) { semaphore.acquire(); try { - service.disconnectFromNode(remoteNode); + service.getRemoteClusterService().getConnections().forEach(con -> { + con.getConnectionManager().disconnectFromNode(remoteNode); + }); semaphore.acquire(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 3d0388ccfad96..e40486d63dc40 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -145,7 +145,7 @@ public static MockTransportService startTransport( } } - public void testLocalProfileIsUsedForLocalCluster() throws Exception { + public void testRemoteProfileIsUsedForLocalCluster() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { @@ -159,7 +159,7 @@ public void testLocalProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -175,9 +175,12 @@ public ClusterSearchShardsResponse read(StreamInput in) throws IOException { }); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) .build(); - service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), - options, futureHandler); - futureHandler.txGet(); + IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { + service.sendRequest(discoverableNode, + ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), options, futureHandler); + futureHandler.txGet(); + }).getCause(); + assertEquals(ise.getMessage(), "can't select channel size is 0 for types: [RECOVERY, BULK, STATE]"); } } } @@ -199,7 +202,7 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -255,7 +258,7 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -284,7 +287,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, seedNodes); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -311,7 +314,7 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -360,7 +363,8 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, + n -> n.equals(rejectedNode) == false)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); @@ -399,7 +403,7 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); @@ -462,7 +466,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -505,7 +509,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -542,7 +546,7 @@ public void testFetchShards() throws Exception { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { updateSeedNodes(connection, nodes); } @@ -582,7 +586,7 @@ public void testFetchShardsThreadContextHeader() throws Exception { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -636,7 +640,7 @@ public void testFetchShardsSkipUnavailable() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") @@ -746,7 +750,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads); @@ -824,7 +828,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -913,7 +917,7 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true)) { + seedNodes, service, service.connectionManager(), maxNumConnections, n -> true)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -1060,7 +1064,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1109,7 +1113,7 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } @@ -1157,7 +1161,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1247,7 +1251,7 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList( () -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -1327,7 +1331,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1335,9 +1339,9 @@ public void close() { assertSame(seedConnection, remoteConnection); } for (int i = 0; i < 10; i++) { - //always a direct connection as the remote node is already connected + // we don't use the transport service connection manager so we will get a proxy connection for the local node Transport.Connection remoteConnection = connection.getConnection(service.getLocalNode()); - assertThat(remoteConnection, not(instanceOf(RemoteClusterConnection.ProxyConnection.class))); + assertThat(remoteConnection, instanceOf(RemoteClusterConnection.ProxyConnection.class)); assertThat(remoteConnection.getNode(), equalTo(service.getLocalNode())); } for (int i = 0; i < 10; i++) { @@ -1369,7 +1373,7 @@ public void testLazyResolveTransportAddress() throws Exception { return seedNode; }; try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { updateSeedNodes(connection, Arrays.asList(seedSupplier)); // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes // being called again so we try to resolve the same seed node's host twice diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index c94b1cbdef547..84a6ce54d1ed1 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -283,6 +283,7 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -347,6 +348,7 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + assertEquals(0, transportService.getConnectionManager().size()); } } } @@ -579,14 +581,16 @@ public void testCollectSearchShards() throws Exception { } CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); - service.addConnectionListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (disconnectedNodes.remove(node)) { - disconnectedLatch.countDown(); + for (RemoteClusterConnection connection : remoteClusterService.getConnections()) { + connection.getConnectionManager().addListener(new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (disconnectedNodes.remove(node)) { + disconnectedLatch.countDown(); + } } - } - }); + }); + } for (DiscoveryNode disconnectedNode : disconnectedNodes) { service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); @@ -664,6 +668,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertTrue(shardsResponse != ClusterSearchShardsResponse.EMPTY); } } + assertEquals(0, service.getConnectionManager().size()); } } } finally { @@ -816,10 +821,5 @@ public void testGetNodePredicatesCombination() { allRoles, Version.CURRENT); assertTrue(nodePredicate.test(node)); } - { - DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"), - allRoles, Version.V_5_3_0); - assertFalse(nodePredicate.test(node)); - } } } diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 0b6112eb51c90..0bf12ba82c821 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -156,19 +156,26 @@ public void testEnsureVersionCompatibility() { TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), Version.CURRENT, randomBoolean()); - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), true); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), true); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.0.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [5.0.0] minimal compatible version is: [5.6.0]", ise.getMessage()); + TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [6.5.0]", ise.getMessage()); + // For handshake we are compatible with N-2 + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), true); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [6.5.0]", ise.getMessage()); ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("6.0.0"), false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [5.6.0]", + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", + ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), Version.fromString("7.0.0"), false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [6.5.0]", ise.getMessage()); } diff --git a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json index abdc11928229f..9ab8995813e33 100644 --- a/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json +++ b/server/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -46,10 +46,6 @@ "format": "yyyy/MM/dd||epoch_millis" }, "f_bool": {"type": "boolean"}, - "f_bool_alias": { - "type": "alias", - "path": "f_bool" - }, "f_byte": {"type": "byte"}, "f_short": {"type": "short"}, "f_int": {"type": "integer"}, @@ -60,6 +56,10 @@ "f_binary": {"type": "binary"}, "f_suggest": {"type": "completion"}, "f_geop": {"type": "geo_point"}, + "f_geop_alias": { + "type": "alias", + "path": "f_geop" + }, "f_geos": {"type": "geo_shape"} } } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index ab513a1b0bb21..8179e3d096a1f 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,9 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks; - dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" @@ -41,9 +38,7 @@ compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures', 'es-test-signatures' } // TODO: should we have licenses for our test deps? diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 3c8f3497871b4..8c2af5c1d3c16 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -478,7 +478,7 @@ private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFact } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); - internalEngine.recoverFromTranslog(); + internalEngine.recoverFromTranslog(Long.MAX_VALUE); return internalEngine; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 3e25b91e06d3e..5aeb30bfdbd5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test; import org.elasticsearch.common.Strings; @@ -34,9 +35,17 @@ public abstract class AbstractSerializingTestCase indexFolders = new ArrayList<>(); - try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), - (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... - for (final Path path : stream) { - indexFolders.add(path); - } + final List indexFolders = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), + (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... + for (final Path path : stream) { + indexFolders.add(path); } - assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, - indexFolders.get(0)); - assertNotNull(indexMetaData); - assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); - assertThat(indexMetaData.getCreationVersion(), equalTo(version)); - return indexFolders.get(0); } + assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + indexFolders.get(0)); + assertNotNull(indexMetaData); + assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); + assertThat(indexMetaData.getCreationVersion(), equalTo(version)); + return indexFolders.get(0); } // randomly distribute the files from src over dests paths diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java index 82d8dbeebe6a1..6ecaae75a8ee2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/MatchAssertion.java @@ -32,6 +32,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; /** @@ -70,8 +71,13 @@ protected void doAssert(Object actualValue, Object expectedValue) { } } - assertNotNull("field [" + getField() + "] is null", actualValue); logger.trace("assert that [{}] matches [{}] (field [{}])", actualValue, expectedValue, getField()); + if (expectedValue == null) { + assertNull("field [" + getField() + "] should be null but was [" + actualValue + "]", actualValue); + return; + } + assertNotNull("field [" + getField() + "] is null", actualValue); + if (actualValue.getClass().equals(safeClass(expectedValue)) == false) { if (actualValue instanceof Number && expectedValue instanceof Number) { //Double 1.0 is equal to Integer 1 diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 60133a16a10af..132a07d5b7f48 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -51,7 +51,6 @@ import org.elasticsearch.transport.TransportStats; import java.io.IOException; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -66,11 +65,13 @@ import static org.apache.lucene.util.LuceneTestCase.rarely; -/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ +/** + * A transport class that doesn't send anything but rather captures all requests for inspection from tests + */ public class CapturingTransport implements Transport { private volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); + private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); private TransportMessageListener listener; @@ -80,7 +81,7 @@ public static class CapturedRequest { public final String action; public final TransportRequest request; - public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { this.node = node; this.requestId = requestId; this.action = action; @@ -96,41 +97,15 @@ public TransportService createCapturingTransportService(Settings settings, Threa @Nullable ClusterSettings clusterSettings, Set taskHeaders) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), settings, this, threadPool); - connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> true); - connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> new Connection() { - @Override - public DiscoveryNode getNode() { - return discoveryNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { - requests.put(requestId, Tuple.tuple(discoveryNode, action)); - capturedRequests.add(new CapturedRequest(discoveryNode, requestId, action, request)); - } - - @Override - public void addCloseListener(ActionListener listener) { - - } - - @Override - public boolean isClosed() { - return false; - } - - @Override - public void close() { - - } - }); + connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); + connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> openConnection(discoveryNode, null)); return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, connectionManager); - } - /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ + /** + * returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} + */ public CapturedRequest[] capturedRequests() { return capturedRequests.toArray(new CapturedRequest[0]); } @@ -178,12 +153,16 @@ public Map> getCapturedRequestsByTargetNodeAndClea return groupRequestsByTargetNode(requests); } - /** clears captured requests */ + /** + * clears captured requests + */ public void clear() { capturedRequests.clear(); } - /** simulate a response for the given requestId */ + /** + * simulate a response for the given requestId + */ public void handleResponse(final long requestId, final TransportResponse response) { responseHandlers.onResponseReceived(requestId, listener).handleResponse(response); } @@ -194,7 +173,7 @@ public void handleResponse(final long requestId, final TransportResponse respons * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleLocalError(final long requestId, final Throwable t) { Tuple request = requests.get(requestId); @@ -208,7 +187,7 @@ public void handleLocalError(final long requestId, final Throwable t) { * * @param requestId the id corresponding to the captured send * request - * @param t the failure to wrap + * @param t the failure to wrap */ public void handleRemoteError(final long requestId, final Throwable t) { final RemoteTransportException remoteException; @@ -234,7 +213,7 @@ public void handleRemoteError(final long requestId, final Throwable t) { * * @param requestId the id corresponding to the captured send * request - * @param e the failure + * @param e the failure */ public void handleError(final long requestId, final TransportException e) { responseHandlers.onResponseReceived(requestId, listener).handleException(e); @@ -251,13 +230,11 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws TransportException { - requests.put(requestId, Tuple.tuple(node, action)); - capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + onSendRequest(requestId, action, request, node); } @Override public void addCloseListener(ActionListener listener) { - } @Override @@ -267,11 +244,19 @@ public boolean isClosed() { @Override public void close() { - } }; } + protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { + requests.put(requestId, Tuple.tuple(node, action)); + capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + } + + protected boolean nodeConnected(DiscoveryNode discoveryNode) { + return true; + } + @Override public TransportStats getStats() { throw new UnsupportedOperationException(); @@ -288,7 +273,7 @@ public Map profileBoundAddresses() { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address, int perAddressLimit) { return new TransportAddress[0]; } @@ -299,22 +284,23 @@ public Lifecycle.State lifecycleState() { @Override public void addLifecycleListener(LifecycleListener listener) { - } @Override public void removeLifecycleListener(LifecycleListener listener) { - } @Override - public void start() {} + public void start() { + } @Override - public void stop() {} + public void stop() { + } @Override - public void close() {} + public void close() { + } @Override public List getLocalAddresses() { @@ -330,6 +316,7 @@ public void registerRequestHandler(RequestHan requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); } } + @Override public ResponseHandlers getResponseHandlers() { return responseHandlers; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 486ccc805d055..012369feb839f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -120,8 +120,8 @@ public void disconnectFromNode(DiscoveryNode node) { } @Override - public int connectedNodeCount() { - return delegate.connectedNodeCount(); + public int size() { + return delegate.size(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 4e59aaecf8de2..c485f9d45bda4 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -768,6 +768,7 @@ public void onAfter() { public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); + final CountDownLatch latch3 = new CountDownLatch(1); try { serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { @@ -777,6 +778,8 @@ public void testNotifyOnShutdown() throws Exception { serviceB.stop(); } catch (Exception e) { fail(e.getMessage()); + } finally { + latch3.countDown(); } }); TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", @@ -788,6 +791,7 @@ public void testNotifyOnShutdown() throws Exception { } catch (TransportException ex) { } + latch3.await(); } finally { serviceB.close(); // make sure we are fully closed here otherwise we might run into assertions down the road serviceA.disconnectFromNode(nodeB); @@ -2650,7 +2654,7 @@ public void testChannelCloseWhileConnecting() { public void onConnectionOpened(final Transport.Connection connection) { closeConnectionChannel(connection); try { - assertBusy(connection::isClosed); + assertBusy(() -> assertTrue(connection.isClosed())); } catch (Exception e) { throw new AssertionError(e); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 5da8601a9f340..500cff893cb1f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -152,7 +152,7 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio parser = createParser(YamlXContent.yamlXContent, "\"First test section\": \n" + " - skip:\n" + - " version: \"5.0.0 - 5.2.0\"\n" + + " version: \"6.0.0 - 6.2.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do :\n" + " catch: missing\n" + @@ -167,9 +167,9 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(testSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_2_0)); + equalTo(Version.V_6_2_0)); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 4c97eb453610e..71814593ad487 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -66,10 +66,10 @@ public void testParseTestSetupTeardownAndSections() throws Exception { " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + "\n" + "---\n" + - "\"Get type mapping - pre 5.0\":\n" + + "\"Get type mapping - pre 6.0\":\n" + "\n" + " - skip:\n" + - " version: \"5.0.0 - \"\n" + + " version: \"6.0.0 - \"\n" + " reason: \"for newer versions the index name is always returned\"\n" + "\n" + " - do:\n" + @@ -97,7 +97,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { } else { assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true)); } - + assertThat(restTestSuite.getTeardownSection(), notNullValue()); if (includeTeardown) { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); @@ -131,12 +131,12 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); assertThat(restTestSuite.getTestSections().get(1).getName(), - equalTo("Get type mapping - pre 5.0")); + equalTo("Get type mapping - pre 6.0")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), - equalTo(Version.V_5_0_0)); + equalTo(Version.V_6_0_0)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java new file mode 100644 index 0000000000000..2bd7234744121 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/MatchAssertionTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.test.ESTestCase; + +public class MatchAssertionTests extends ESTestCase { + + public void testNull() { + XContentLocation xContentLocation = new XContentLocation(0, 0); + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", null); + matchAssertion.doAssert(null, null); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert("non-null", null)); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "non-null"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "non-null")); + } + { + MatchAssertion matchAssertion = new MatchAssertion(xContentLocation, "field", "/exp/"); + expectThrows(AssertionError.class, () -> matchAssertion.doAssert(null, "/exp/")); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index cb9ab009b2594..e883e8e062af2 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -53,7 +53,7 @@ public void testParseSetupSection() throws Exception { public void testParseSetupAndSkipSectionNoSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do:\n" + " index1:\n" + @@ -74,9 +74,9 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); assertThat(setupSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_5_3_0)); + equalTo(Version.V_6_3_0)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getDoSections().size(), equalTo(2)); assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 3ab9583335e7c..e5e466a82cc18 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,17 +34,17 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkip() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_5_0_0)); - section = new SkipSection(randomBoolean() ? null : "5.0.0 - 5.1.0", + assertTrue(section.skip(Version.V_6_0_0)); + section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); } public void testMessage() { - SkipSection section = new SkipSection("5.0.0 - 5.1.0", + SkipSection section = new SkipSection("6.0.0 - 6.1.0", Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); section = new SkipSection(null, Collections.singletonList("warnings"), "foobar"); @@ -55,14 +55,14 @@ public void testMessage() { public void testParseSkipSectionVersionNoFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, - "version: \" - 5.1.1\"\n" + + "version: \" - 6.1.1\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_5_1_1)); + assertThat(skipSection.getUpperVersion(), equalTo(Version.V_6_1_1)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 15ca1ec0096e3..07afa9f33b5b1 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -56,7 +56,7 @@ public void testParseTeardownSection() throws Exception { public void testParseWithSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"5.0.0 - 5.3.0\"\n" + + " version: \"6.0.0 - 6.3.0\"\n" + " reason: \"there is a reason\"\n" + " - do:\n" + " delete:\n" + @@ -75,8 +75,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_5_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index c16dab6a625c8..0f02283e53738 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -29,7 +27,7 @@ loggerUsageCheck.enabled = false forbiddenApisMain.enabled = true // disabled by parent project forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures + replaceSignatureFiles 'jdk-signatures' // does not depend on core, only jdk signatures } jarHell.enabled = true // disabled by parent project diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index aab8435558198..6cca05c4a0ef3 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -30,7 +30,8 @@ buildRestTests.expectedUnconvertedCandidates = [ ] dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } @@ -309,7 +310,7 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' "job_id":"farequote", "indexes":"farequote" } -''' +''' setups['ml_filter_safe_domains'] = ''' - do: xpack.ml.put_filter: @@ -749,3 +750,22 @@ setups['jacknich_user'] = ''' "metadata" : { "intelligence" : 7 } } ''' +setups['app0102_privileges'] = ''' + - do: + xpack.security.put_privileges: + body: > + { + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" ], + "metadata": { + "description": "Read access to myapp" + } + } + } + } +''' diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc index 349ce343c7ae9..ed53929391bfe 100644 --- a/x-pack/docs/en/rest-api/defs.asciidoc +++ b/x-pack/docs/en/rest-api/defs.asciidoc @@ -2,8 +2,8 @@ [[ml-api-definitions]] == Definitions -These resource definitions are used in {ml} APIs and in {kib} advanced -job configuration options. +These resource definitions are used in {ml} and {security} APIs and in {kib} +advanced {ml} job configuration options. * <> * <> @@ -13,6 +13,7 @@ job configuration options. * <> * <> * <> +* <> * <> [role="xpack"] @@ -26,6 +27,8 @@ include::ml/jobresource.asciidoc[] [role="xpack"] include::ml/jobcounts.asciidoc[] [role="xpack"] +include::security/role-mapping-resources.asciidoc[] +[role="xpack"] include::ml/snapshotresource.asciidoc[] [role="xpack"] include::ml/resultsresource.asciidoc[] diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc index f595d52ec10a1..115ef8fb04381 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc @@ -101,6 +101,7 @@ GET /sensor_rollup/_rollup_search -------------------------------------------------- // CONSOLE // TEST[setup:sensor_prefab_data] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] The query is targeting the `sensor_rollup` data, since this contains the rollup data as configured in the job. A `max` aggregation has been used on the `temperature` field, yielding the following response: @@ -194,6 +195,7 @@ GET sensor-1,sensor_rollup/_rollup_search <1> -------------------------------------------------- // CONSOLE // TEST[continued] +// TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] <1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time When the search is executed, the Rollup Search endpoint will do two things: diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index f5b0c8eef667d..3ba582d5d7856 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -6,18 +6,39 @@ You can use the following APIs to perform {security} activities. * <> * <> -* <> -* <> +* <> * <> +[float] +[[security-api-app-privileges]] +=== Application privileges + +You can use the following APIs to add, update, retrieve, and remove application +privileges: + +* <> +* <> +* <> + +[float] +[[security-role-mapping-apis]] +=== Role mappings + +You can use the following APIs to add, remove, update, and retrieve role mappings: + +* <> +* <> +* <> + [float] [[security-role-apis]] === Roles -You can use the following APIs to add, remove, and retrieve roles in the native realm: +You can use the following APIs to add, remove, update, and retrieve roles in the native realm: -* <>, <> +* <> * <> +* <> * <> [float] @@ -27,34 +48,43 @@ You can use the following APIs to add, remove, and retrieve roles in the native You can use the following APIs to create and invalidate bearer tokens for access without requiring basic authentication: -* <>, <> +* <> +* <> [float] [[security-user-apis]] === Users -You can use the following APIs to create, read, update, and delete users from the +You can use the following APIs to add, remove, update, or retrieve users in the native realm: -* <>, <> -* <>, <> +* <> * <> +* <> +* <> +* <> * <> + +include::security/put-app-privileges.asciidoc[] include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] +include::security/create-role-mappings.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/create-roles.asciidoc[] include::security/create-users.asciidoc[] +include::security/delete-app-privileges.asciidoc[] +include::security/delete-role-mappings.asciidoc[] include::security/delete-roles.asciidoc[] include::security/delete-tokens.asciidoc[] include::security/delete-users.asciidoc[] include::security/disable-users.asciidoc[] include::security/enable-users.asciidoc[] +include::security/get-app-privileges.asciidoc[] +include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] include::security/get-users.asciidoc[] -include::security/privileges.asciidoc[] -include::security/role-mapping.asciidoc[] +include::security/has-privileges.asciidoc[] include::security/ssl.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc new file mode 100644 index 0000000000000..87dedbba4f7cf --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -0,0 +1,239 @@ +[role="xpack"] +[[security-api-put-role-mapping]] +=== Create or update role mappings API + +Creates and updates role mappings. + +==== Request + +`POST /_xpack/security/role_mapping/` + + +`PUT /_xpack/security/role_mapping/` + + +==== Description + +Role mappings define which roles are assigned to each user. Each mapping has +_rules_ that identify users and a list of _roles_ that are +granted to those users. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using <> or +{stack-ov}/defining-roles.html#roles-management-file[roles files]. + +For more information, see +{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role mapping: + +`enabled` (required):: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles` (required):: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules` (required):: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. See +<>. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example assigns the "user" role to all users: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping1 +{ + "roles": [ "user"], + "enabled": true, <1> + "rules": { + "field" : { "username" : "*" } + }, + "metadata" : { <2> + "version" : 1 + } +} +------------------------------------------------------------ +// CONSOLE +<1> Mappings that have `enabled` set to `false` are ignored when role mapping + is performed. +<2> Metadata is optional. + +A successful call returns a JSON structure that shows whether the mapping has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "role_mapping" : { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing mapping is updated, `created` is set to false. + +The following example assigns the "user" and "admin" roles to specific users: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role_mapping/mapping2 +{ + "roles": [ "user", "admin" ], + "enabled": true, + "rules": { + "field" : { "username" : [ "esadmin01", "esadmin02" ] } + } +} +-------------------------------------------------- +// CONSOLE + +The following example matches any user where either the username is `esadmin` +or the user is in the `cn=admin,dc=example,dc=com` group: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping3 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "any": [ + { + "field": { + "username": "esadmin" + } + }, + { + "field": { + "groups": "cn=admins,dc=example,dc=com" + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping4 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a specific LDAP sub-tree: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping5 +{ + "roles": [ "example-user" ], + "enabled": true, + "rules": { + "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a particular LDAP sub-tree in a +specific realm: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping6 +{ + "roles": [ "ldap-example-user" ], + "enabled": true, + "rules": { + "all": [ + { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, + { "field" : { "realm.name" : "ldap1" } } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The rules can be more complex and include wildcard matching. For example, the +following mapping matches any user where *all* of these conditions are met: + +- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, + or the username is `es-admin`, or the username is `es-system` +- the user in in the `cn=people,dc=example,dc=com` group +- the user does not have a `terminated_date` + + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping7 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "all": [ + { + "any": [ + { + "field": { + "dn": "*,ou=admin,dc=example,dc=com" + } + }, + { + "field": { + "username": [ "es-admin", "es-system" ] + } + } + ] + }, + { + "field": { + "groups": "cn=people,dc=example,dc=com" + } + }, + { + "except": { + "field": { + "metadata.terminated_date": null + } + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 749676b4e8360..fc3c613557ef0 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-api-put-role]] -=== Create roles API +=== Create or update roles API -Adds roles in the native realm. +Adds and updates roles in the native realm. ==== Request @@ -29,9 +29,20 @@ file-based role management. For more information about the native realm, see The following parameters can be specified in the body of a PUT or POST request and pertain to adding a role: +`applications`:: (list) A list of application privilege entries. +`application` (required)::: (string) The name of the application to which this entry applies +`privileges`::: (list) A list of strings, where each element is the name of an application +privilege or action. +`resources`::: (list) A list resources to which the privileges are applied. + `cluster`:: (list) A list of cluster privileges. These privileges define the cluster level actions that users with this role are able to execute. +`global`:: (object) An object defining global privileges. A global privilege is +a form of cluster privilege that is request-aware. Support for global privileges +is currently limited to the management of application privileges. +This field is optional. + `indices`:: (list) A list of indices permissions entries. `field_security`::: (list) The document fields that the owners of the role have read access to. For more information, see @@ -79,6 +90,13 @@ POST /_xpack/security/role/my_admin_role "query": "{\"match\": {\"title\": \"foo\"}}" // optional } ], + "applications": [ + { + "application": "myapp", + "privileges": [ "admin", "read" ], + "resources": [ "*" ] + } + ], "run_as": [ "other_user" ], // optional "metadata" : { // optional "version" : 1 diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc index 5015d0401c223..91171b0e57eb4 100644 --- a/x-pack/docs/en/rest-api/security/create-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-api-put-user]] -=== Create users API +=== Create or update users API -Creates and updates users in the native realm. These users are commonly referred +Adds and updates users in the native realm. These users are commonly referred to as _native users_. diff --git a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc new file mode 100644 index 0000000000000..d7f001721b1fd --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[[security-api-delete-privilege]] +=== Delete application privileges API + +Removes +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`DELETE /_xpack/security/privilege//` + +//==== Description + +==== Path Parameters + +`application` (required):: + (string) The name of the application. Application privileges are always + associated with exactly one application. + +`privilege` (required):: + (string) The name of the privilege. + +// ==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example deletes the `read` application privilege from the +`myapp` application: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "found" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc new file mode 100644 index 0000000000000..dc9bf2ba10904 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[[security-api-delete-role-mapping]] +=== Delete role mappings API + +Removes role mappings. + +==== Request + +`DELETE /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example delete a role mapping: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + +If the mapping is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc new file mode 100644 index 0000000000000..5412a4bdceb83 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc @@ -0,0 +1,94 @@ +[role="xpack"] +[[security-api-get-privileges]] +=== Get application privileges API + +Retrieves +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`GET /_xpack/security/privilege` + + +`GET /_xpack/security/privilege/` + + +`GET /_xpack/security/privilege//` + + +==== Description + +To check a user's application privileges, use the +<>. + + +==== Path Parameters + +`application`:: + (string) The name of the application. Application privileges are always + associated with exactly one application. + If you do not specify this parameter, the API returns information about all + privileges for all applications. + +`privilege`:: + (string) The name of the privilege. If you do not specify this parameter, the + API returns information about all privileges for the requested application. + +//==== Request Body + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +The following example retrieves information about the `read` privilege for the +`app01` application: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/read +-------------------------------------------------- +// CONSOLE +// TEST[setup:app0102_privileges] + +A successful call returns an object keyed by application name and privilege +name. If the privilege is not defined, the request responds with a 404 status. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "application": "myapp", + "name": "read", + "actions": [ + "data:read/*", + "action:login" + ], + "metadata": { + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +To retrieve all privileges for an application, omit the privilege name: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/myapp/ +-------------------------------------------------- +// CONSOLE + +To retrieve every privilege, omit both the application and privilege names: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/privilege/ +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc new file mode 100644 index 0000000000000..7abe34b32f560 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[security-api-get-role-mapping]] +=== Get role mappings API + +Retrieves role mappings. + +==== Request + +`GET /_xpack/security/role_mapping` + + +`GET /_xpack/security/role_mapping/` + +==== Description + +Role mappings define which roles are assigned to each user. For more information, +see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. You can specify multiple + mapping names as a comma-separated list. If you do not specify this + parameter, the API returns information about all role mappings. + +//==== Request Body + +==== Results + +A successful call retrieves an object, where the keys are the +names of the request mappings, and the values are the JSON representation of +those mappings. For more information, see +<>. + +If there is no mapping with the requested name, the +response will have status code `404`. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example retrieves information about the `mapping1` role mapping: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + + +[source,js] +-------------------------------------------------- +{ + "mapping1": { + "enabled": true, + "roles": [ + "user" + ], + "rules": { + "field": { + "username": "*" + } + }, + "metadata": {} + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc similarity index 69% rename from x-pack/docs/en/rest-api/security/privileges.asciidoc rename to x-pack/docs/en/rest-api/security/has-privileges.asciidoc index adaf27e97073e..cae1bc4d303fe 100644 --- a/x-pack/docs/en/rest-api/security/privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] -[[security-api-privileges]] -=== Privilege APIs +[[security-api-has-privileges]] +=== Has Privileges API [[security-api-has-privilege]] @@ -15,7 +15,7 @@ a specified list of privileges. ==== Description For a list of the privileges that you can specify in this API, -see {xpack-ref}/security-privileges.html[Security Privileges]. +see {stack-ov}/security-privileges.html[Security privileges]. A successful call returns a JSON structure that shows whether each specified privilege is assigned to the user. @@ -30,6 +30,14 @@ privilege is assigned to the user. `privileges`::: (list) A list of the privileges that you want to check for the specified indices. +`application`:: +`application`::: (string) The name of the application. +`privileges`::: (list) A list of the privileges that you want to check for the +specified resources. May be either application privilege names, or the names of +actions that are granted by those privileges +`resources`::: (list) A list of resource names against which the privileges +should be checked + ==== Authorization All users can use this API, but only to determine their own privileges. @@ -41,7 +49,7 @@ more information, see ==== Examples The following example checks whether the current user has a specific set of -cluster and indices privileges: +cluster, index, and application privileges: [source,js] -------------------------------------------------- @@ -57,6 +65,13 @@ GET _xpack/security/user/_has_privileges "names": [ "inventory" ], "privileges" : [ "read", "write" ] } + ], + "application": [ + { + "application": "inventory_manager", + "privileges" : [ "read", "data:write/inventory" ], + "resources" : [ "product/1852563" ] + } ] } -------------------------------------------------- @@ -85,7 +100,14 @@ The following example output indicates which privileges the "rdeniro" user has: "write" : false } }, - "application" : {} + "application" : { + "inventory_manager" : { + "product/1852563" : { + "read": false, + "data:write/inventory": false + } + } + } } -------------------------------------------------- // TESTRESPONSE[s/"rdeniro"/"$body.username"/] diff --git a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc new file mode 100644 index 0000000000000..f715a80014bea --- /dev/null +++ b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc @@ -0,0 +1,163 @@ +[role="xpack"] +[[security-api-put-privileges]] +=== Create or update application privileges API + +Adds or updates +{stack-ov}/security-privileges.html#application-privileges[application privileges]. + +==== Request + +`POST /_xpack/security/privilege` + + +`PUT /_xpack/security/privilege` + + +==== Description + +This API creates or updates privileges. To remove privileges, use the +<>. + +For more information, see +{stack-ov}/defining-roles.html#roles-application-priv[Application privileges]. + +To check a user's application privileges, use the +<>. + +==== Request Body + +The body is a JSON object where the names of the fields are the application +names and the value of each field is an object. The fields in this inner +object are the names of the privileges and each value is a JSON object that +includes the following fields: + +`actions`:: (array-of-string) A list of action names that are granted by this +privilege. This field must exist and cannot be an empty array. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + + +[[security-api-app-privileges-validation]] +==== Validation + +Application Names:: + Application names are formed from a _prefix_, with an optional _suffix_ that + conform to the following rules: + * The prefix must begin with a lowercase ASCII letter + * The prefix must contain only ASCII letters or digits + * The prefix must be at least 3 characters long + * If the suffix exists, it must begin with either `-` or `_` + * The suffix cannot contain any of the following characters: + `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*` + * No part of the name can contain whitespace. + +Privilege Names:: + Privilege names must begin with a lowercase ASCII letter and must contain + only ASCII letters and digits along with the characters `_`, `-` and `.` + +Action Names:: + Action names can contain any number of printable ASCII characters and must + contain at least one of the following characters: `/` `*`, `:` + +==== Authorization + +To use this API, you must have either: + +- the `manage_security` cluster privilege (or a greater privilege such as `all`); _or_ +- the _"Manage Application Privileges"_ global privilege for the application being referenced + in the request + +==== Examples + +To add a single privilege, submit a PUT or POST request to the +`/_xpack/security/privilege//` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "myapp": { + "read": { + "actions": [ <1> + "data:read/*" , <2> + "action:login" ], + "metadata": { <3> + "description": "Read access to myapp" + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> These strings have significance within the "myapp" application. {es} does not + assign any meaning to them. +<2> The use of a wildcard here (`*`) means that this privilege grants access to + all actions that start with `data:read/`. {es} does not assign any meaning + to these actions. However, if the request includes an application privilege + such as `data:read/users` or `data:read/settings`, the + <> respects the use of a + wildcard and returns `true`. +<3> The metadata object is optional. + +A successful call returns a JSON structure that shows whether the privilege has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "myapp": { + "read": { + "created": true <1> + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing privilege is updated, `created` is set to false. + +To add multiple privileges, submit a POST request to the +`/_xpack/security/privilege/` endpoint. For example: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/privilege +{ + "app01": { + "read": { + "actions": [ "action:login", "data:read/*" ] + }, + "write": { + "actions": [ "action:login", "data:write/*" ] + } + }, + "app02": { + "all": { + "actions": [ "*" ] + } + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the privileges +have been created or updated. + +[source,js] +-------------------------------------------------- +{ + "app02": { + "all": { + "created": true + } + }, + "app01": { + "read": { + "created": true + }, + "write": { + "created": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc new file mode 100644 index 0000000000000..be4afc57a1a54 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[[role-mapping-resources]] +=== Role mapping resources + +A role mapping resource has the following properties: + +`enabled`:: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles`:: +(list) A list of roles that are granted to the users that match the role mapping +rules. + +`rules`:: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. The DSL supports the following rule types: +`any`::: +(array of rules) If *any* of its children are true, it evaluates to `true`. +`all`::: +(array of rules) If *all* of its children are true, it evaluates to `true`. +`field`::: +(object) See <>. +`except`:: +(object) A single rule as an object. Only valid as a child of an `all` rule. If +its child is `false`, the `except` is `true`. + + +[float] +[[mapping-roles-rule-field]] +==== Field rules + +The `field` rule is the primary building block for a role mapping expression. +It takes a single object as its value and that object must contain a single +member with key _F_ and value _V_. The field rule looks up the value of _F_ +within the user object and then tests whether the user value _matches_ the +provided value _V_. + +The value specified in the field rule can be one of the following types: +[cols="2,5,3m"] +|======================= +| Type | Description | Example + +| Simple String | Exactly matches the provided value. | "esadmin" +| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" +| Regular Expression | Matches the provided value using a + {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" +| Number | Matches an equivalent numerical value. | 7 +| Null | Matches a null or missing value. | null +| Array | Tests each element in the array in + accordance with the above definitions. + If _any_ of elements match, the match is successful. | ["admin", "operator"] +|======================= + +[float] +===== User fields + +The _user object_ against which rules are evaluated has the following fields: + +`username`:: +(string) The username by which {security} knows this user. For example, `"username": "jsmith"`. +`dn`:: +(string) The _Distinguished Name_ of the user. For example, `"dn": "cn=jsmith,ou=users,dc=example,dc=com",`. +`groups`:: +(array of strings) The groups to which the user belongs. For example, `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com","cn=esusers,ou=groups,dc=example,dc=com ]`. +`metadata`:: +(object) Additional metadata for the user. For example, `"metadata": { "cn": "John Smith" }`. +`realm`:: +(object) The realm that authenticated the user. The only field in this object is the realm name. For example, `"realm": { "name": "ldap1" }`. + +The `groups` field is multi-valued; a user can belong to many groups. When a +`field` rule is applied against a multi-valued field, it is considered to match +if _at least one_ of the member values matches. For example, the following rule +matches any user who is a member of the `admin` group, regardless of any +other groups they belong to: + +[source, js] +------------------------------------------------------------ +{ "field" : { "groups" : "admin" } } +------------------------------------------------------------ +// NOTCONSOLE + +For additional realm-specific details, see +{stack-ov}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. diff --git a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc deleted file mode 100644 index c8006346d4e8f..0000000000000 --- a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc +++ /dev/null @@ -1,404 +0,0 @@ -[role="xpack"] -[[security-api-role-mapping]] -=== Role Mapping APIs - -The Role Mapping API enables you to add, remove, and retrieve role mappings. - -==== Request - -`GET /_xpack/security/role_mapping` + - -`GET /_xpack/security/role_mapping/` + - -`DELETE /_xpack/security/role_mapping/` + - -`POST /_xpack/security/role_mapping/` + - -`PUT /_xpack/security/role_mapping/` - -==== Description - -Role mappings have _rules_ that identify users and a list of _roles_ that are -granted to those users. - -NOTE: This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using <> or -{xpack-ref}/defining-roles.html#roles-management-file[roles files]. - -The role mapping rule is a logical condition that is expressed using a JSON DSL. -The DSL supports the following rule types: - -|======================= -| Type | Value Type (child) | Description - -| `any` | An array of rules | If *any* of its children are true, it - evaluates to `true`. -| `all` | An array of rules | If *all* of its children are true, it - evaluates to `true`. -| `field` | An object | See <> -| `except` | A single rule as an object | Only valid as a child of an `all` - rule. If its child is `false`, the - `except` is `true`. -|======================= - -[float] -[[mapping-roles-rule-field]] -===== The Field Rule - -The `field` rule is the primary building block for a role-mapping expression. -It takes a single object as its value and that object must contain a single -member with key _F_ and value _V_. The field rule looks up the value of _F_ -within the user object and then tests whether the user value _matches_ the -provided value _V_. - -The value specified in the field rule can be one of the following types: -[cols="2,5,3m"] -|======================= -| Type | Description | Example - -| Simple String | Exactly matches the provided value. | "esadmin" -| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" -| Regular Expression | Matches the provided value using a - {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" -| Number | Matches an equivalent numerical value. | 7 -| Null | Matches a null or missing value. | null -| Array | Tests each element in the array in - accordance with the above definitions. - If _any_ of elements match, the match is successful. | ["admin", "operator"] -|======================= - -===== User Fields - -The _user object_ against which rules are evaluated has the following fields: -[cols="1s,,,m"] -|======================= -| Name | Type | Description | Example - -| username | string | The username by which {security} knows this user. | `"username": "jsmith"` -| dn | string | The _Distinguished Name_ of the user. | `"dn": "cn=jsmith,ou=users,dc=example,dc=com",` -| groups | array-of-string | The groups to which the user belongs. | `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com", -"cn=esusers,ou=groups,dc=example,dc=com ]` -| metadata | object | Additional metadata for the user. | `"metadata": { "cn": "John Smith" }` -| realm | object | The realm that authenticated the user. The only field in this object is the realm name. | `"realm": { "name": "ldap1" }` -|======================= - -The `groups` field is multi-valued; a user can belong to many groups. When a -`field` rule is applied against a multi-valued field, it is considered to match -if _at least one_ of the member values matches. For example, the following rule -matches any user who is a member of the `admin` group, regardless of any -other groups they belong to: - -[source, js] ------------------------------------------------------------- -{ "field" : { "groups" : "admin" } } ------------------------------------------------------------- -// NOTCONSOLE - -For additional realm-specific details, see -{xpack-ref}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. - - -==== Path Parameters - -`name`:: - (string) The distinct name that identifies the role mapping. The name is - used solely as an identifier to facilitate interaction via the API; it does - not affect the behavior of the mapping in any way. If you do not specify this - parameter for the Get Role Mappings API, it returns information about all - role mappings. - - -==== Request Body - -The following parameters can be specified in the body of a PUT or POST request -and pertain to adding a role mapping: - -`enabled` (required):: -(boolean) Mappings that have `enabled` set to `false` are ignored when role -mapping is performed. - -`metadata`:: -(object) Additional metadata that helps define which roles are assigned to each -user. Within the `metadata` object, keys beginning with `_` are reserved for -system usage. - -`roles` (required):: -(list) A list of roles that are granted to the users that match the role-mapping -rules. - -`rules` (required):: -(object) The rules that determine which users should be matched by the mapping. -A rule is a logical condition that is expressed by using a JSON DSL. - - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster privilege. - - -==== Examples - -[[security-api-put-role-mapping]] -To add a role mapping, submit a PUT or POST request to the `/_xpack/security/role_mapping/` endpoint. The following example assigns -the "user" role to all users: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping1 -{ - "roles": [ "user"], - "enabled": true, <1> - "rules": { - "field" : { "username" : "*" } - }, - "metadata" : { <2> - "version" : 1 - } -} ------------------------------------------------------------- -// CONSOLE -<1> Mappings that have `enabled` set to `false` are ignored when role mapping - is performed. -<2> Metadata is optional. - -A successful call returns a JSON structure that shows whether the mapping has -been created or updated. - -[source,js] --------------------------------------------------- -{ - "role_mapping" : { - "created" : true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing mapping is updated, `created` is set to false. - -The following example assigns the "user" and "admin" roles to specific users: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role_mapping/mapping2 -{ - "roles": [ "user", "admin" ], - "enabled": true, - "rules": { - "field" : { "username" : [ "esadmin01", "esadmin02" ] } - } -} --------------------------------------------------- -// CONSOLE - -The following example matches any user where either the username is `esadmin` -or the user is in the `cn=admin,dc=example,dc=com` group: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping3 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "any": [ - { - "field": { - "username": "esadmin" - } - }, - { - "field": { - "groups": "cn=admins,dc=example,dc=com" - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users who authenticated against a specific realm: -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping4 -{ - "roles": [ "ldap-user" ], - "enabled": true, - "rules": { - "field" : { "realm.name" : "ldap1" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a specific LDAP sub-tree: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping5 -{ - "roles": [ "example-user" ], - "enabled": true, - "rules": { - "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } - } -} ------------------------------------------------------------- -// CONSOLE - -The following example matches users within a particular LDAP sub-tree in a -specific realm: - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping6 -{ - "roles": [ "ldap-example-user" ], - "enabled": true, - "rules": { - "all": [ - { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, - { "field" : { "realm.name" : "ldap1" } } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -The rules can be more complex and include wildcard matching. For example, the -following mapping matches any user where *all* of these conditions are met: - -- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, - or the username is `es-admin`, or the username is `es-system` -- the user in in the `cn=people,dc=example,dc=com` group -- the user does not have a `terminated_date` - - -[source, js] ------------------------------------------------------------- -POST /_xpack/security/role_mapping/mapping7 -{ - "roles": [ "superuser" ], - "enabled": true, - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ "es-admin", "es-system" ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - } -} ------------------------------------------------------------- -// CONSOLE - -[[security-api-get-role-mapping]] -To retrieve a role mapping, issue a GET request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/role_mapping/mapping7 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call retrieves an object, where the keys are the -names of the request mappings, and the values are -the JSON representation of those mappings. -If there is no mapping with the requested name, the -response will have status code `404`. - -[source,js] --------------------------------------------------- -{ - "mapping7": { - "enabled": true, - "roles": [ - "superuser" - ], - "rules": { - "all": [ - { - "any": [ - { - "field": { - "dn": "*,ou=admin,dc=example,dc=com" - } - }, - { - "field": { - "username": [ - "es-admin", - "es-system" - ] - } - } - ] - }, - { - "field": { - "groups": "cn=people,dc=example,dc=com" - } - }, - { - "except": { - "field": { - "metadata.terminated_date": null - } - } - } - ] - }, - "metadata": {} - } -} --------------------------------------------------- -// TESTRESPONSE - -You can specify multiple mapping names as a comma-separated list. -To retrieve all mappings, omit the name entirely. - -[[security-api-delete-role-mapping]] -To delete a role mapping, submit a DELETE request to the -`/_xpack/security/role_mapping/` endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/role_mapping/mapping1 --------------------------------------------------- -// CONSOLE -// TEST[setup:role_mapping] - -If the mapping is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index 6298bb8ef9f54..ba554eb8595dd 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -173,7 +173,7 @@ represent user roles for different systems in the organization. The `active_directory` realm enables you to map Active Directory users to roles via their Active Directory groups or other metadata. This role mapping can be -configured via the <> or by using +configured via the <> or by using a file stored on each node. When a user authenticates against an Active Directory realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index 683da76bb7b99..fbf823dae7060 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -55,18 +55,23 @@ cluster. + -- The `users` file stores all the users and their passwords. Each line in the file -represents a single user entry consisting of the username and **hashed** password. +represents a single user entry consisting of the username and **hashed** and **salted** password. [source,bash] ---------------------------------------------------------------------- rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS -jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +jacknich:{PBKDF2}50000$z1CLJt0MEFjkIK5iEfgvfnA6xq7lF25uasspsTKSo5Q=$XxCVLbaKDimOdyWgLCLJiyoiWpA/XDMe/xtVgn1r5Sg= ---------------------------------------------------------------------- -{security} uses `bcrypt` to hash the user passwords. +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the file realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -While it is possible to modify this files directly using any standard text +While it is possible to modify the `users` files directly using any standard text editor, we strongly recommend using the <> tool to apply the required changes. @@ -103,4 +108,4 @@ By default, {security} checks these files for changes every 5 seconds. You can change this default behavior by changing the `resource.reload.interval.high` setting in the `elasticsearch.yml` file (as this is a common setting in {es}, changing its value may effect other schedules in the system). --- \ No newline at end of file +-- diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index e32c9eb5300b3..d3572ae5e1b92 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -133,7 +133,7 @@ supports both failover and load balancing modes of operation. See -- The `ldap` realm enables you to map LDAP users to to roles via their LDAP groups, or other metadata. This role mapping can be configured via the -{ref}/security-api-role-mapping.html[role-mapping API] or by using a file stored +{ref}/security-api-put-role-mapping.html[add role mapping API] or by using a file stored on each node. When a user authenticates with LDAP, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc index 3cda29c2c711f..e9fb9cd0eb8a0 100644 --- a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -34,6 +34,13 @@ xpack: type: native order: 0 ------------------------------------------------------------ + +NOTE: To limit exposure to credential theft and mitigate credential compromise, +the native realm stores passwords and caches user credentials according to +security best practices. By default, a hashed version of user credentials +is stored in memory, using a salted `sha-256` hash algorithm and a hashed +version of passwords is stored on disk salted and hashed with the `bcrypt` +hash algorithm. To use different hash algorithms, see <>. -- . Restart {es}. diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index f66a82b06641e..acaa8429d07f7 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -126,7 +126,7 @@ The `certificate_authorities` option can be used as an alternative to the + -- You map roles for PKI users through the -<> or by using a file stored on +<> or by using a file stored on each node. When a user authenticates against a PKI realm, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 7139f4f81987d..4facceff81cde 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -76,12 +76,13 @@ binding. There are five configuration steps to enable SAML authentication in {es}: -. Enable SSL/TLS for HTTP -. Enable the Token Service -. Create one or more SAML realms -. Configure role mappings +. <> +. <> +. <> +. <> . Generate a SAML Metadata file for use by your Identity Provider _(optional)_ +[[saml-enable-http]] ==== Enable TLS for HTTP If your {es} cluster is operating in production mode, then you must @@ -91,6 +92,7 @@ authentication. For more information, see {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. +[[saml-enable-token]] ==== Enable the token service The {es} SAML implementation makes use of the {es} Token Service. This service @@ -356,6 +358,35 @@ address such as `admin@staff.example.com.attacker.net`. It is important that you make sure your regular expressions are as precise as possible so that you do not inadvertently open an avenue for user impersonation attacks. +[[req-authn-context]] +==== Requesting specific authentication methods + +It is sometimes necessary for a SAML SP to be able to impose specific +restrictions regarding the authentication that will take place at an IdP, +in order to assess the level of confidence that it can place in +the corresponding authentication response. The restrictions might have to do +with the authentication method (password, client certificates, etc), the +user identification method during registration, and other details. {es} implements +https://docs.oasis-open.org/security/saml/v2.0/saml-authn-context-2.0-os.pdf[SAML 2.0 Authentication Context], which can be used for this purpose as defined in SAML 2.0 Core +Specification. + +In short, the SAML SP defines a set of Authentication Context Class Reference +values, which describe the restrictions to be imposed on the IdP, and sends these +in the Authentication Request. The IdP attempts to grant these restrictions. +If it cannot grant them, the authentication attempt fails. If the user is +successfully authenticated, the Authentication Statement of the SAML Response +contains an indication of the restrictions that were satisfied. + +You can define the Authentication Context Class Reference values by using the `req_authn_context_class_ref` option in the SAML realm configuration. See +{ref}/security-settings.html#ref-saml-settings[SAML realm settings]. + +{es} supports only the `exact` comparison method for the Authentication Context. +When it receives the Authentication Response from the IdP, {es} examines the +value of the Authentication Context Class Reference that is part of the +Authentication Statement of the SAML Assertion. If it matches one of the +requested values, the authentication is considered successful. Otherwise, the +authentication attempt fails. + [[saml-logout]] ==== SAML logout @@ -573,6 +604,7 @@ The passphrase for the keystore, if the file is encypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. +[[saml-sp-metadata]] === Generating SP metadata Some Identity Providers support importing a metadata file from the Service @@ -592,9 +624,9 @@ When a user authenticates using SAML, they are identified to the Elastic Stack, but this does not automatically grant them access to perform any actions or access any data. -Your SAML users cannot do anything until they are mapped to X-Pack Security +Your SAML users cannot do anything until they are mapped to {security} roles. This mapping is performed through the -{ref}/security-api-role-mapping.html[role-mapping API] +{ref}/security-api-put-role-mapping.html[add role mapping API]. This is an example of a simple role mapping that grants the `kibana_user` role to any user who authenticates against the `saml1` realm: @@ -626,7 +658,7 @@ mapping are derived from the SAML attributes as follows: - `metadata`: See <> For more information, see <> and -{ref}/security-api-role-mapping.html[Role Mapping APIs]. +{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. If your IdP has the ability to provide groups or roles to Service Providers, then you should map this SAML attribute to the `attributes.groups` setting in diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index 36af070bf067b..716e7af99145c 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -12,27 +12,8 @@ object to avoid unnecessarily needing to perform role mapping on each request. The cached user credentials are hashed in memory. By default, {security} uses a salted `sha-256` hash algorithm. You can use a different hashing algorithm by -setting the `cache_hash_algo` setting to any of the following: - -[[cache-hash-algo]] -.Cache hash algorithms -|======================= -| Algorithm | | | Description -| `ssha256` | | | Uses a salted `sha-256` algorithm (default). -| `md5` | | | Uses `MD5` algorithm. -| `sha1` | | | Uses `SHA1` algorithm. -| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. -| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. -| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. -| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. -| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. -| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. -| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. -| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in - memory. CAUTION: keeping clear text is considered insecure - and can be compromised at the OS level (for example through - memory dumps and using `ptrace`). -|======================= +setting the `cache.hash_algo` realm settings. See +{ref}/security-settings.html#hashing-settings[User cache and password hash algorithms]. [[cache-eviction-api]] ==== Evicting users from the cache diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index f550c900edce0..7b30284f58365 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -9,7 +9,10 @@ A role is defined by the following JSON structure: { "run_as": [ ... ], <1> "cluster": [ ... ], <2> - "indices": [ ... ] <3> + "global": { ... }, <3> + "indices": [ ... ], <4> + "applications": [ ... ] <5> + } ----- // NOTCONSOLE @@ -19,8 +22,15 @@ A role is defined by the following JSON structure: cluster level actions users with this role are able to execute. This field is optional (missing `cluster` privileges effectively mean no cluster level permissions). -<3> A list of indices permissions entries. This field is optional (missing `indices` +<3> An object defining global privileges. A global privilege is a form of + cluster privilege that is request sensitive. A standard cluster privilege + makes authorization decisions based solely on the action being executed. + A global privilege also considers the parameters included in the request. + Support for global privileges is currently limited to the management of + application privileges. This field is optional. +<4> A list of indices permissions entries. This field is optional (missing `indices` privileges effectively mean no index level permissions). +<5> A list of application privilege entries. This field is optional. [[valid-role-name]] NOTE: Role names must be at least 1 and no more than 1024 characters. They can @@ -28,6 +38,9 @@ NOTE: Role names must be at least 1 and no more than 1024 characters. They can punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed. +[[roles-indices-priv]] +==== Indices Privileges + The following describes the structure of an indices permissions entry: [source,js] @@ -77,8 +90,60 @@ names or regular expressions that refer to multiple indices. ------------------------------------------------------------------------------ ============================================================================== -The following snippet shows an example definition of a `clicks_admin` role: +[[roles-global-priv]] +==== Global Privileges +The following describes the structure of a global privileges entry: + +[source,js] +------- +{ + "application": { + "manage": { <1> + "applications": [ ... ] <2> + } + } +} +------- +// NOTCONSOLE + +<1> The only supported global privilege is the ability to manage application + privileges +<2> The list of application names that may be managed. This list supports + wildcards (e.g. `"myapp-*"`) and regular expressions (e.g. + `"/app[0-9]*/"`) + +[[roles-application-priv]] +==== Application Privileges +The following describes the structure of an application privileges entry: +[source,js] +------- +{ + "application": "my_app", <1> + "privileges": [ ... ], <2> + "resources": [ ... ] <3> +} +------- +// NOTCONSOLE + +<1> The name of the application. +<2> The list of the names of the application privileges to grant to this role. +<3> The resources to which those privileges apply. These are handled in the same + way as index name pattern in `indices` permissions. These resources do not + have any special meaning to {security}. + +For details about the validation rules for these fields, see the +{ref}/security-api-put-privileges.html[add application privileges API]. + +A role may refer to application privileges that do not exist - that is, they +have not yet been defined through the add application privileges API (or they +were defined, but have since been deleted). In this case, the privilege has +no effect, and will not grant any actions in the +{ref}/security-api-has-privileges.html[has privileges API]. + +==== Example + +The following snippet shows an example definition of a `clicks_admin` role: [source,js] ----------- POST /_xpack/security/role/clicks_admin diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 36f3a1f27f346..ecafe2bd3ec9d 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -28,7 +28,7 @@ you are able to map users to both API-managed roles and file-managed roles ==== Using the role mapping API You can define role-mappings through the -{ref}/security-api-role-mapping.html[role mapping API]. +{ref}/security-api-put-role-mapping.html[add role mapping API]. [[mapping-roles-file]] ==== Using role mapping files diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index a13547263a582..47d580491c139 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -27,6 +27,9 @@ https://www.elastic.co/subscriptions and your cluster. If you are using a trial license, the default value is `false`. For more information, see {ref}/security-settings.html[Security Settings in {es}]. +. If you plan to run {es} in a Federal Information Processing Standard (FIPS) +140-2 enabled JVM, see <>. + . Configure Transport Layer Security (TLS/SSL) for internode-communication. + -- @@ -145,5 +148,6 @@ include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc include::authentication/configuring-kerberos-realm.asciidoc[] +include::fips-140-compliance.asciidoc[] include::{es-repo-dir}/settings/security-settings.asciidoc[] include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc new file mode 100644 index 0000000000000..ceb605c2e2db1 --- /dev/null +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -0,0 +1,128 @@ +[role="xpack"] +[[fips-140-compliance]] +=== FIPS 140-2 + +The Federal Information Processing Standard (FIPS) Publication 140-2, (FIPS PUB +140-2), titled "Security Requirements for Cryptographic Modules" is a U.S. +government computer security standard used to approve cryptographic modules. +{es} offers a FIPS 140-2 compliant mode and as such can run in a FIPS 140-2 +enabled JVM. In order to set {es} in fips mode, you must set the +`xpack.security.fips_mode.enabled` to `true` in `elasticsearch.yml` + +For {es}, FIPS 140-2 compliance is ensured by + +- Using FIPS approved / NIST recommended cryptographic algorithms. +- Delegating the implementation of these cryptographic algorithms to a NIST + validated cryptographic module (available via the Java Security Provider + in use in the JVM). +- Allowing the configuration of {es} in a FIPS 140-2 compliant manner, as + documented below. + +[float] +=== Upgrade considerations + +If you plan to upgrade your existing Cluster to a version that can be run in +a FIPS 140-2 enabled JVM, the suggested approach is to first perform a rolling +upgrade to the new version in your existing JVM and perform all necessary +configuration changes in preparation for running in fips mode. You can then +perform a rolling restart of the nodes, this time starting each node in the FIPS +140-2 JVM. This will allow {es} to take care of a couple of things automatically for you: + +- <> will be upgraded to the latest format version as + previous format versions cannot be loaded in a FIPS 140-2 JVM. +- Self-generated trial licenses will be upgraded to the latest format that + is compliant with FIPS 140-2. + +If you are on a appropriate license level (platinum) you can elect to perform +a rolling upgrade while at the same time running each upgraded node in a +FIPS 140-2 JVM. In this case, you would need to also regenerate your +`elasticsearch.keystore` and migrate all secure settings to it, in addition to the +necessary configuration changes outlined below, before starting each node. + +[float] +=== Configuring {es} for FIPS 140-2 + +Apart from setting `xpack.security.fips_mode.enabled`, a number of security +related settings need to be configured accordingly in order to be compliant +and able to run {es} successfully in a FIPS 140-2 enabled JVM. + +[float] +==== TLS + +SSLv2 and SSLv3 are not allowed by FIPS 140-2, so `SSLv2Hello` and `SSLv3` cannot +be used for <> + +NOTE: The use of TLS ciphers is mainly governed by the relevant crypto module +(the FIPS Approved Security Provider that your JVM uses). All the ciphers that +are configured by default in {es} are FIPS 140-2 compliant and as such can be +used in a FIPS 140-2 JVM. (see <>) + +[float] +==== TLS Keystores and keys + +Keystores can be used in a number of <> in order to +conveniently store key and trust material. Neither `JKS`, nor `PKCS#12` keystores +can be used in a FIPS 140-2 enabled JVM however, so you must refrain from using +these keystores. Your FIPS 140-2 provider may provide a compliant keystore that +can be used or you can use PEM encoded files. To use PEM encoded key material, +you can use the relevant `\*.key` and `*.certificate` configuration +options, and for trust material you can use `*.certificate_authorities`. + + +FIPS 140-2 compliance dictates that the length of the public keys used for TLS +must correspond to the strength of the symmetric key algorithm in use in TLS. +Depending on the value of <> that +you select to use, the TLS keys must have corresponding length according to +the following table: + +[[comparable-key-strength]] +.Comparable key strengths +|======================= +| Symmetric Key Algorithm | RSA key Length | ECC key length +| `3DES` | 2048 | 224-255 +| `AES-128` | 3072 | 256-383 +| `AES-256` | 15630 | 512+ +|======================= + +[float] +==== Password Hashing + +{es} offers a number of algorithms for securely hashing credentials in memory and +on disk. However, only the `PBKDF2` family of algorithms is compliant with FIPS +140-2 for password hashing. You must set the the `cache.hash_algo` realm settings +and the `xpack.security.authc.password_hashing.algorithm` setting to one of the +available `PBKDF2` values. +See <>. + +Password hashing configuration changes are not retroactive so the stored hashed +credentials of existing users of the file and native realms will not be updated +on disk. +Authentication will still work, but in order to ensure FIPS 140-2 compliance, +you would need to recreate users or change their password using the +<> CLI tool for the file realm and the +<> for the native realm. + +The user cache will be emptied upon node restart, so any existing hashes using +non-compliant algorithms will be discarded and the new ones will be created +using the compliant `PBKDF2` algorithm you have selected. + +[float] +=== Limitations + +Due to the limitations that FIPS 140-2 compliance enforces, a small number of +features are not available while running in fips mode. The list is as follows: + +* Azure Classic Discovery Plugin +* Ingest Attachment Plugin +* The {ref}/certutil.html[`elasticsearch-certutil`] tool. However, + `elasticsearch-certutil` can very well be used in a non FIPS 140-2 + enabled JVM (pointing `JAVA_HOME` environment variable to a different java + installation) in order to generate the keys and certificates that + can be later used in the FIPS 140-2 enabled JVM. +* The `elasticsearch-plugin` tool. Accordingly, `elasticsearch-plugin` can be + used with a different (non FIPS 140-2 enabled) Java installation if + available. +* The SQL CLI client cannot run in a FIPS 140-2 enabled JVM while using + TLS for transport security or PKI for client authentication. +* The SAML Realm cannot decrypt and consume encrypted Assertions or encrypted + attributes in Attribute Statements from the SAML IdP. \ No newline at end of file diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index d1c88b2786f1e..72a05ada29958 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -15,6 +15,7 @@ answers for frequently asked questions. * <> * <> * <> +* <> * <> * <> @@ -319,6 +320,77 @@ In this case, you must install the <>. -- +[[trb-security-kerberos]] +=== Common Kerberos exceptions + +*Symptoms:* + +* User authentication fails due to either GSS negotiation failure +or a service login failure (either on the server or in the {es} http client). +Some of the common exceptions are listed below with some tips to help resolve +them. + +*Resolution:* + +`Failure unspecified at GSS-API level (Mechanism level: Checksum failed)`:: ++ +-- + +When you see this error message on the HTTP client side, then it may be +related to an incorrect password. + +When you see this error message in the {es} server logs, then it may be +related to the {es} service keytab. The keytab file is present but it failed +to log in as the user. Please check the keytab expiry. Also check whether the +keytab contain up-to-date credentials; if not, replace them. + +You can use tools like `klist` or `ktab` to list principals inside +the keytab and validate them. You can use `kinit` to see if you can acquire +initial tickets using the keytab. Please check the tools and their documentation +in your Kerberos environment. + +Kerberos depends on proper hostname resolution, so please check your DNS infrastructure. +Incorrect DNS setup, DNS SRV records or configuration for KDC servers in `krb5.conf` +can cause problems with hostname resolution. + +-- + +`Failure unspecified at GSS-API level (Mechanism level: Request is a replay (34))`:: + +`Failure unspecified at GSS-API level (Mechanism level: Clock skew too great (37))`:: ++ +-- + +To prevent replay attacks, Kerberos V5 sets a maximum tolerance for computer +clock synchronization and it is typically 5 minutes. Please check whether +the time on the machines within the domain is in sync. + +-- + +As Kerberos logs are often cryptic in nature and many things can go wrong +as it depends on external services like DNS and NTP. You might +have to enable additional debug logs to determine the root cause of the issue. + +{es} uses a JAAS (Java Authentication and Authorization Service) Kerberos login +module to provide Kerberos support. To enable debug logs on {es} for the login +module use following Kerberos realm setting: +[source,yaml] +---------------- +xpack.security.authc.realms..krb.debug: true +---------------- + +For detailed information, see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +Sometimes you may need to go deeper to understand the problem during SPNEGO +GSS context negotiation or look at the Kerberos message exchange. To enable +Kerberos/SPNEGO debug logging on JVM, add following JVM system properties: + +`-Dsun.security.krb5.debug=true` + +`-Dsun.security.spnego.debug=true` + +For more information about JVM system properties, see {ref}/jvm-options.html[configuring JVM options]. + [[trb-security-internalserver]] === Internal Server Error in Kibana diff --git a/x-pack/license-tools/build.gradle b/x-pack/license-tools/build.gradle index 183b9ab50e03b..4bd17713a2fea 100644 --- a/x-pack/license-tools/build.gradle +++ b/x-pack/license-tools/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.build' dependencies { - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch:elasticsearch:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index d066769645b5c..0b1f889a2c121 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -47,7 +47,7 @@ gradle.projectsEvaluated { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle index e9e57762c37e2..97d4008eb8c1f 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle @@ -3,7 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } @@ -36,4 +36,5 @@ followClusterTestRunner { finalizedBy 'leaderClusterTestCluster#stop' } +check.dependsOn followClusterTest test.enabled = false diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle index 97c019d4c73a4..897aed0110e17 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -3,7 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } @@ -71,4 +71,5 @@ followClusterTestRunner { finalizedBy 'leaderClusterTestCluster#stop' } +check.dependsOn followClusterTest test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 537584f7b59f1..cc726e1a65257 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -3,7 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } @@ -37,4 +37,5 @@ followClusterTestRunner { finalizedBy 'leaderClusterTestCluster#stop' } +check.dependsOn followClusterTest test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java index eaded2456306a..b5d6697fc73c2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java @@ -40,7 +40,7 @@ private CcrStatsAction() { @Override public TasksResponse newResponse() { - return null; + return new TasksResponse(); } public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index ff877724ce4b5..677b8955490da 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -280,7 +280,7 @@ private FollowingEngine createEngine(Store store, EngineConfig config) throws IO SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L); store.associateIndexWithNewTranslog(translogUuid); FollowingEngine followingEngine = new FollowingEngine(config); - followingEngine.recoverFromTranslog(); + followingEngine.recoverFromTranslog(Long.MAX_VALUE); return followingEngine; } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index a3b4bea9702f5..a58500b880f94 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -8,7 +8,6 @@ import java.nio.file.StandardCopyOption apply plugin: 'elasticsearch.esplugin' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -apply plugin: 'com.github.johnrengelman.shadow' archivesBaseName = 'x-pack-core' @@ -27,19 +26,18 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compile project(':x-pack:protocol') - shadow "org.apache.httpcomponents:httpclient:${versions.httpclient}" - shadow "org.apache.httpcomponents:httpcore:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" - shadow "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - shadow "commons-logging:commons-logging:${versions.commonslogging}" - shadow "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" // security deps - shadow 'com.unboundid:unboundid-ldapsdk:3.2.0' - shadow project(path: ':modules:transport-netty4', configuration: 'runtime') - shadow(project(path: ':plugins:transport-nio', configuration: 'runtime')) { + compile 'com.unboundid:unboundid-ldapsdk:3.2.0' + compile project(path: ':modules:transport-netty4', configuration: 'runtime') + compile(project(path: ':plugins:transport-nio', configuration: 'runtime')) { // TODO: core exclusion should not be necessary, since it is a transitive dep of all plugins exclude group: "org.elasticsearch", module: "elasticsearch-core" } @@ -112,8 +110,7 @@ test { // TODO: don't publish test artifacts just to run messy tests, fix the tests! // https://github.com/elastic/x-plugins/issues/724 configurations { - testArtifacts.extendsFrom(testRuntime, shadow) - testArtifacts.exclude(group: project(':x-pack:protocol').group, module: project(':x-pack:protocol').name) + testArtifacts.extendsFrom testRuntime } task testJar(type: Jar) { appendix 'test' diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java new file mode 100644 index 0000000000000..41f066daf93d3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Locale; + +/** + * Fetch information about X-Pack from the cluster. + */ +public class XPackInfoRequest extends ActionRequest { + + public enum Category { + BUILD, LICENSE, FEATURES; + + public static EnumSet toSet(String... categories) { + EnumSet set = EnumSet.noneOf(Category.class); + for (String category : categories) { + switch (category) { + case "_all": + return EnumSet.allOf(Category.class); + case "_none": + return EnumSet.noneOf(Category.class); + default: + set.add(Category.valueOf(category.toUpperCase(Locale.ROOT))); + } + } + return set; + } + } + + private boolean verbose; + private EnumSet categories = EnumSet.noneOf(Category.class); + + public XPackInfoRequest() {} + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + public boolean isVerbose() { + return verbose; + } + + public void setCategories(EnumSet categories) { + this.categories = categories; + } + + public EnumSet getCategories() { + return categories; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.verbose = in.readBoolean(); + EnumSet categories = EnumSet.noneOf(Category.class); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + categories.add(Category.valueOf(in.readString())); + } + this.categories = categories; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(verbose); + out.writeVInt(categories.size()); + for (Category category : categories) { + out.writeString(category.name()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java new file mode 100644 index 0000000000000..b51a451a67faa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class XPackInfoResponse extends ActionResponse implements ToXContentObject { + /** + * Value of the license's expiration time if it should never expire. + */ + public static final long BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS = Long.MAX_VALUE - TimeUnit.HOURS.toMillis(24 * 365); + // TODO move this constant to License.java once we move License.java to the protocol jar + + @Nullable private BuildInfo buildInfo; + @Nullable private LicenseInfo licenseInfo; + @Nullable private FeatureSetsInfo featureSetsInfo; + + public XPackInfoResponse() {} + + public XPackInfoResponse(@Nullable BuildInfo buildInfo, @Nullable LicenseInfo licenseInfo, @Nullable FeatureSetsInfo featureSetsInfo) { + this.buildInfo = buildInfo; + this.licenseInfo = licenseInfo; + this.featureSetsInfo = featureSetsInfo; + } + + /** + * @return The build info (incl. build hash and timestamp) + */ + public BuildInfo getBuildInfo() { + return buildInfo; + } + + /** + * @return The current license info (incl. UID, type/mode. status and expiry date). May return {@code null} when no + * license is currently installed. + */ + public LicenseInfo getLicenseInfo() { + return licenseInfo; + } + + /** + * @return The current status of the feature sets in X-Pack. Feature sets describe the features available/enabled in X-Pack. + */ + public FeatureSetsInfo getFeatureSetsInfo() { + return featureSetsInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(buildInfo); + out.writeOptionalWriteable(licenseInfo); + out.writeOptionalWriteable(featureSetsInfo); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.buildInfo = in.readOptionalWriteable(BuildInfo::new); + this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); + this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + XPackInfoResponse rhs = (XPackInfoResponse) other; + return Objects.equals(buildInfo, rhs.buildInfo) + && Objects.equals(licenseInfo, rhs.licenseInfo) + && Objects.equals(featureSetsInfo, rhs.featureSetsInfo); + } + + @Override + public int hashCode() { + return Objects.hash(buildInfo, licenseInfo, featureSetsInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, false); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "xpack_info_response", true, (a, v) -> { + BuildInfo buildInfo = (BuildInfo) a[0]; + LicenseInfo licenseInfo = (LicenseInfo) a[1]; + @SuppressWarnings("unchecked") // This is how constructing object parser works + List featureSets = (List) a[2]; + FeatureSetsInfo featureSetsInfo = featureSets == null ? null : new FeatureSetsInfo(new HashSet<>(featureSets)); + return new XPackInfoResponse(buildInfo, licenseInfo, featureSetsInfo); + }); + static { + PARSER.declareObject(optionalConstructorArg(), BuildInfo.PARSER, new ParseField("build")); + /* + * licenseInfo is sort of "double optional" because it is + * optional but it can also be send as `null`. + */ + PARSER.declareField(optionalConstructorArg(), (p, v) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + return LicenseInfo.PARSER.parse(p, v); + }, + new ParseField("license"), ValueType.OBJECT_OR_NULL); + PARSER.declareNamedObjects(optionalConstructorArg(), + (p, c, name) -> FeatureSetsInfo.FeatureSet.PARSER.parse(p, name), + new ParseField("features")); + } + public static XPackInfoResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (buildInfo != null) { + builder.field("build", buildInfo, params); + } + + EnumSet categories = XPackInfoRequest.Category + .toSet(Strings.splitStringByCommaToArray(params.param("categories", "_all"))); + if (licenseInfo != null) { + builder.field("license", licenseInfo, params); + } else if (categories.contains(XPackInfoRequest.Category.LICENSE)) { + // if the user requested the license info, and there is no license, we should send + // back an explicit null value (indicating there is no license). This is different + // than not adding the license info at all + builder.nullField("license"); + } + + if (featureSetsInfo != null) { + builder.field("features", featureSetsInfo, params); + } + + if (params.paramAsBoolean("human", true)) { + builder.field("tagline", "You know, for X"); + } + + return builder.endObject(); + } + + public static class LicenseInfo implements ToXContentObject, Writeable { + private final String uid; + private final String type; + private final String mode; + private final LicenseStatus status; + private final long expiryDate; + + public LicenseInfo(String uid, String type, String mode, LicenseStatus status, long expiryDate) { + this.uid = uid; + this.type = type; + this.mode = mode; + this.status = status; + this.expiryDate = expiryDate; + } + + public LicenseInfo(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString(), LicenseStatus.readFrom(in), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uid); + out.writeString(type); + out.writeString(mode); + status.writeTo(out); + out.writeLong(expiryDate); + } + + public String getUid() { + return uid; + } + + public String getType() { + return type; + } + + public String getMode() { + return mode; + } + + public long getExpiryDate() { + return expiryDate; + } + + public LicenseStatus getStatus() { + return status; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + LicenseInfo rhs = (LicenseInfo) other; + return Objects.equals(uid, rhs.uid) + && Objects.equals(type, rhs.type) + && Objects.equals(mode, rhs.mode) + && Objects.equals(status, rhs.status) + && expiryDate == rhs.expiryDate; + } + + @Override + public int hashCode() { + return Objects.hash(uid, type, mode, status, expiryDate); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "license_info", true, (a, v) -> { + String uid = (String) a[0]; + String type = (String) a[1]; + String mode = (String) a[2]; + LicenseStatus status = LicenseStatus.fromString((String) a[3]); + Long expiryDate = (Long) a[4]; + long primitiveExpiryDate = expiryDate == null ? BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS : expiryDate; + return new LicenseInfo(uid, type, mode, status, primitiveExpiryDate); + }); + static { + PARSER.declareString(constructorArg(), new ParseField("uid")); + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareString(constructorArg(), new ParseField("mode")); + PARSER.declareString(constructorArg(), new ParseField("status")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("expiry_date_in_millis")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("uid", uid) + .field("type", type) + .field("mode", mode) + .field("status", status.label()); + if (expiryDate != BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate); + } + return builder.endObject(); + } + } + + public static class BuildInfo implements ToXContentObject, Writeable { + private final String hash; + private final String timestamp; + + public BuildInfo(String hash, String timestamp) { + this.hash = hash; + this.timestamp = timestamp; + } + + public BuildInfo(StreamInput input) throws IOException { + this(input.readString(), input.readString()); + } + + @Override + public void writeTo(StreamOutput output) throws IOException { + output.writeString(hash); + output.writeString(timestamp); + } + + public String getHash() { + return hash; + } + + public String getTimestamp() { + return timestamp; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + BuildInfo rhs = (BuildInfo) other; + return Objects.equals(hash, rhs.hash) + && Objects.equals(timestamp, rhs.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(hash, timestamp); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "build_info", true, (a, v) -> new BuildInfo((String) a[0], (String) a[1])); + static { + PARSER.declareString(constructorArg(), new ParseField("hash")); + PARSER.declareString(constructorArg(), new ParseField("date")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("hash", hash) + .field("date", timestamp) + .endObject(); + } + } + + public static class FeatureSetsInfo implements ToXContentObject, Writeable { + private final Map featureSets; + + public FeatureSetsInfo(Set featureSets) { + Map map = new HashMap<>(featureSets.size()); + for (FeatureSet featureSet : featureSets) { + map.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(map); + } + + public FeatureSetsInfo(StreamInput in) throws IOException { + int size = in.readVInt(); + Map featureSets = new HashMap<>(size); + for (int i = 0; i < size; i++) { + FeatureSet featureSet = new FeatureSet(in); + featureSets.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(featureSets); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(featureSets.size()); + for (FeatureSet featureSet : featureSets.values()) { + featureSet.writeTo(out); + } + } + + public Map getFeatureSets() { + return featureSets; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSetsInfo rhs = (FeatureSetsInfo) other; + return Objects.equals(featureSets, rhs.featureSets); + } + + @Override + public int hashCode() { + return Objects.hash(featureSets); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + List names = new ArrayList<>(this.featureSets.keySet()).stream().sorted().collect(Collectors.toList()); + for (String name : names) { + builder.field(name, featureSets.get(name), params); + } + return builder.endObject(); + } + + public static class FeatureSet implements ToXContentObject, Writeable { + private final String name; + @Nullable private final String description; + private final boolean available; + private final boolean enabled; + @Nullable private final Map nativeCodeInfo; + + public FeatureSet(String name, @Nullable String description, boolean available, boolean enabled, + @Nullable Map nativeCodeInfo) { + this.name = name; + this.description = description; + this.available = available; + this.enabled = enabled; + this.nativeCodeInfo = nativeCodeInfo; + } + + public FeatureSet(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeBoolean(available); + out.writeBoolean(enabled); + out.writeMap(nativeCodeInfo); + } + + public String name() { + return name; + } + + @Nullable + public String description() { + return description; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Nullable + public Map nativeCodeInfo() { + return nativeCodeInfo; + } + + @Override + public boolean equals(Object other) { + if (other == null || other.getClass() != getClass()) return false; + if (this == other) return true; + FeatureSet rhs = (FeatureSet) other; + return Objects.equals(name, rhs.name) + && Objects.equals(description, rhs.description) + && available == rhs.available + && enabled == rhs.enabled + && Objects.equals(nativeCodeInfo, rhs.nativeCodeInfo); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, available, enabled, nativeCodeInfo); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature_set", true, (a, name) -> { + String description = (String) a[0]; + boolean available = (Boolean) a[1]; + boolean enabled = (Boolean) a[2]; + @SuppressWarnings("unchecked") // Matches up with declaration below + Map nativeCodeInfo = (Map) a[3]; + return new FeatureSet(name, description, available, enabled, nativeCodeInfo); + }); + static { + PARSER.declareString(optionalConstructorArg(), new ParseField("description")); + PARSER.declareBoolean(constructorArg(), new ParseField("available")); + PARSER.declareBoolean(constructorArg(), new ParseField("enabled")); + PARSER.declareObject(optionalConstructorArg(), (p, name) -> p.map(), new ParseField("native_code_info")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (description != null) { + builder.field("description", description); + } + builder.field("available", available); + builder.field("enabled", enabled); + if (nativeCodeInfo != null) { + builder.field("native_code_info", nativeCodeInfo); + } + return builder.endObject(); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java new file mode 100644 index 0000000000000..83621a9ac3d41 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; + +public class XPackUsageRequest extends MasterNodeRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java new file mode 100644 index 0000000000000..ccf681837fdcd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Response object from calling the xpack usage api. + * + * Usage information for each feature is accessible through {@link #getUsages()}. + */ +public class XPackUsageResponse { + + private final Map> usages; + + private XPackUsageResponse(Map> usages) throws IOException { + this.usages = usages; + } + + @SuppressWarnings("unchecked") + private static Map castMap(Object value) { + return (Map)value; + } + + /** Return a map from feature name to usage information for that feature. */ + public Map> getUsages() { + return usages; + } + + public static XPackUsageResponse fromXContent(XContentParser parser) throws IOException { + Map rawMap = parser.map(); + Map> usages = rawMap.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> castMap(e.getValue()))); + return new XPackUsageResponse(usages); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java new file mode 100644 index 0000000000000..3934095512120 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import java.util.Arrays; +import java.util.Map; + +/** + * Common utilities used for XPack protocol classes + */ +public final class ProtocolUtils { + + /** + * Implements equals for a map of string arrays + * + * The map of string arrays is used in some XPack protocol classes but does't work with equal. + */ + public static boolean equals(Map a, Map b) { + if (a == null) { + return b == null; + } + if (b == null) { + return false; + } + if (a.size() != b.size()) { + return false; + } + for (Map.Entry entry : a.entrySet()) { + String[] val = entry.getValue(); + String key = entry.getKey(); + if (val == null) { + if (b.get(key) != null || b.containsKey(key) == false) { + return false; + } + } else { + if (Arrays.equals(val, b.get(key)) == false) { + return false; + } + } + } + return true; + } + + /** + * Implements hashCode for map of string arrays + * + * The map of string arrays does't work with hashCode. + */ + public static int hashCode(Map a) { + int hash = 0; + for (Map.Entry entry : a.entrySet()) + hash += Arrays.hashCode(entry.getValue()); + return hash; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java similarity index 51% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java index f3d9289644918..994c7e2c2d5a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -3,18 +3,25 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; +import java.util.List; import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * A Connection links exactly two {@link Vertex} objects. The basis of a @@ -23,10 +30,10 @@ * as a weight. */ public class Connection { - Vertex from; - Vertex to; - double weight; - long docCount; + private Vertex from; + private Vertex to; + private double weight; + private long docCount; public Connection(Vertex from, Vertex to, double weight, long docCount) { this.from = from; @@ -35,7 +42,7 @@ public Connection(Vertex from, Vertex to, double weight, long docCount) { this.docCount = docCount; } - void readFrom(StreamInput in, Map vertices) throws IOException { + public Connection(StreamInput in, Map vertices) throws IOException { from = vertices.get(new VertexId(in.readString(), in.readString())); to = vertices.get(new VertexId(in.readString(), in.readString())); weight = in.readDouble(); @@ -80,13 +87,81 @@ public double getWeight() { public long getDocCount() { return docCount; } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { - builder.field("source", vertexNumbers.get(from)); - builder.field("target", vertexNumbers.get(to)); - builder.field("weight", weight); - builder.field("doc_count", docCount); + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); + } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } + /** * An identifier (implements hashcode and equals) that represents a diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java similarity index 72% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index e44f9f7603752..196982c0a35fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -14,6 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; @@ -29,7 +31,7 @@ * Holds the criteria required to guide the exploration of connected terms which * can be returned as a graph. */ -public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable { +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; @@ -51,8 +53,8 @@ public GraphExploreRequest() { } /** - * Constructs a new graph request to run against the provided - * indices. No indices means it will run against all indices. + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. */ public GraphExploreRequest(String... indices) { this.indices = indices; @@ -75,7 +77,6 @@ public String[] indices() { return this.indices; } - @Override public GraphExploreRequest indices(String... indices) { this.indices = indices; @@ -123,10 +124,14 @@ public TimeValue timeout() { } /** - * Graph exploration can be set to timeout after the given period. Search operations involved in - * each hop are limited to the remaining time available but can still overrun due to the nature - * of their "best efforts" timeout support. When a timeout occurs partial results are returned. - * @param timeout a {@link TimeValue} object which determines the maximum length of time to spend exploring + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring */ public GraphExploreRequest timeout(TimeValue timeout) { if (timeout == null) { @@ -153,10 +158,10 @@ public void readFrom(StreamInput in) throws IOException { sampleSize = in.readInt(); sampleDiversityField = in.readOptionalString(); maxDocsPerDiversityValue = in.readInt(); - + useSignificance = in.readBoolean(); returnDetailedInfo = in.readBoolean(); - + int numHops = in.readInt(); Hop parentHop = null; for (int i = 0; i < numHops; i++) { @@ -180,7 +185,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(sampleSize); out.writeOptionalString(sampleDiversityField); out.writeInt(maxDocsPerDiversityValue); - + out.writeBoolean(useSignificance); out.writeBoolean(returnDetailedInfo); out.writeInt(hops.size()); @@ -196,18 +201,21 @@ public String toString() { } /** - * The number of top-matching documents that are considered during each hop (default is - * {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} - * Very small values (less than 50) may not provide sufficient weight-of-evidence to identify - * significant connections between terms. - *

Very large values (many thousands) are not recommended with loosely defined queries (fuzzy queries or those - * with many OR clauses). - * This is because any useful signals in the best documents are diluted with irrelevant noise from low-quality matches. - * Performance is also typically better with smaller samples as there are less look-ups required for background frequencies - * of terms found in the documents + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

+ * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents *

* - * @param maxNumberOfDocsPerHop shard-level sample size in documents + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents */ public void sampleSize(int maxNumberOfDocsPerHop) { sampleSize = maxNumberOfDocsPerHop; @@ -242,10 +250,13 @@ public int maxDocsPerDiversityValue() { } /** - * Controls the choice of algorithm used to select interesting terms. The default - * value is true which means terms are selected based on significance (see the {@link SignificantTerms} - * aggregation) rather than popularity (using the {@link TermsAggregator}). - * @param value true if the significant_terms algorithm should be used. + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. */ public void useSignificance(boolean value) { this.useSignificance = value; @@ -254,32 +265,37 @@ public void useSignificance(boolean value) { public boolean useSignificance() { return useSignificance; } - + /** - * Return detailed information about vertex frequencies as part of JSON results - defaults to false - * @param value true if detailed information is required in JSON responses + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses */ public void returnDetailedInfo(boolean value) { this.returnDetailedInfo = value; - } + } public boolean returnDetailedInfo() { return returnDetailedInfo; } - /** - * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected - * to other terms in a subsequent hop. - * @param guidingQuery optional choice of query which influences which documents - * are considered in this stage - * @return a {@link Hop} object that holds settings for a stage in the graph exploration + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration */ public Hop createNextHop(QueryBuilder guidingQuery) { Hop parent = null; if (hops.size() > 0) { - parent = hops.get(hops.size() - 1); + parent = hops.get(hops.size() - 1); } Hop newHop = new Hop(parent); newHop.guidingQuery = guidingQuery; @@ -330,6 +346,43 @@ void writeTo(StreamOutput out) throws IOException { } } - + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java similarity index 61% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index 3d6c5f5aaca5e..12eb20617ff0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -3,26 +3,34 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import com.carrotsearch.hppc.ObjectIntHashMap; + import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import java.io.IOException; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects @@ -100,8 +108,7 @@ public void readFrom(StreamInput in) throws IOException { connections = new HashMap<>(); for (int i = 0; i < size; i++) { - Connection e = new Connection(); - e.readFrom(in, vertices); + Connection e = new Connection(in, vertices); connections.put(e.getId(), e); } @@ -146,23 +153,19 @@ public void writeTo(StreamOutput out) throws IOException { } - static final class Fields { - static final String TOOK = "took"; - static final String TIMED_OUT = "timed_out"; - static final String INDICES = "_indices"; - static final String FAILURES = "failures"; - static final String VERTICES = "vertices"; - static final String CONNECTIONS = "connections"; - - } + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.TOOK, tookInMillis); - builder.field(Fields.TIMED_OUT, timedOut); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); - builder.startArray(Fields.FAILURES); + builder.startArray(FAILURES.getPreferredName()); if (shardFailures != null) { for (ShardOperationFailedException shardFailure : shardFailures) { builder.startObject(); @@ -178,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); Params extendedParams = new DelegatingMapParams(extraParams, params); - builder.startArray(Fields.VERTICES); + builder.startArray(VERTICES.getPreferredName()); for (Vertex vertex : vertices.values()) { builder.startObject(); vertexNumbers.put(vertex, vertexNumbers.size()); @@ -187,7 +190,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); - builder.startArray(Fields.CONNECTIONS); + builder.startArray(CONNECTIONS.getPreferredName()); for (Connection connection : connections.values()) { builder.startObject(); connection.toXContent(builder, extendedParams, vertexNumbers); @@ -198,5 +201,48 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java similarity index 85% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java index 8ba7005f15fcf..e61403e8b37a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -3,12 +3,14 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -41,7 +43,7 @@ *

* */ -public class Hop { +public class Hop implements ToXContentFragment{ final Hop parentHop; List vertices = null; QueryBuilder guidingQuery = null; @@ -139,4 +141,20 @@ public int getNumberVertexRequests() { public VertexRequest getVertexRequest(int requestNumber) { return getEffectiveVertexRequests().get(requestNumber); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java similarity index 65% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java index c85d6d7dfd6e1..f17812a6396a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -3,14 +3,21 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * A vertex in a graph response represents a single term (a field and value pair) @@ -27,6 +34,13 @@ public class Vertex implements ToXContentFragment { private final int depth; private final long bg; private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { super(); @@ -50,20 +64,72 @@ void writeTo(StreamOutput out) throws IOException { out.writeVLong(bg); out.writeVLong(fg); } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); - builder.field("field", field); - builder.field("term", term); - builder.field("weight", weight); - builder.field("depth", depth); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); if (returnDetailedInfo) { - builder.field("fg", fg); - builder.field("bg", bg); + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); } return builder; } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + /** * @return a {@link VertexId} object that uniquely identifies this Vertex diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java similarity index 78% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java index f7f7dec4b1722..63d2c616547d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -3,11 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.graph.action; +package org.elasticsearch.protocol.xpack.graph; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import java.io.IOException; import java.util.HashMap; @@ -21,9 +23,10 @@ * inclusion list to filter which terms are considered. * */ -public class VertexRequest { +public class VertexRequest implements ToXContentObject { private String fieldName; - private int size = 5; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; private Map includes; private Set excludes; public static final int DEFAULT_MIN_DOC_COUNT = 3; @@ -195,4 +198,38 @@ public VertexRequest shardMinDocCount(int value) { return this; } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java new file mode 100644 index 0000000000000..5d5dd0f5ef61d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Graph + * APIs. + */ +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java new file mode 100644 index 0000000000000..62353b093b5b5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/DeleteLicenseRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + + +public class DeleteLicenseRequest extends AcknowledgedRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java new file mode 100644 index 0000000000000..926ce1d1d705b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + + +public class GetLicenseRequest extends MasterNodeReadRequest { + + public GetLicenseRequest() { + } + + public GetLicenseRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java new file mode 100644 index 0000000000000..6d5e1b5653fe7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseResponse.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionResponse; + +public class GetLicenseResponse extends ActionResponse { + + private String license; + + GetLicenseResponse() { + } + + public GetLicenseResponse(String license) { + this.license = license; + } + + public String getLicenseDefinition() { + return license; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java new file mode 100644 index 0000000000000..5bc66ab745e49 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicenseStatus.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +/** + * Status of an X-Pack license. + */ +public enum LicenseStatus implements Writeable { + + ACTIVE("active"), + INVALID("invalid"), + EXPIRED("expired"); + + private final String label; + + LicenseStatus(String label) { + this.label = label; + } + + public String label() { + return label; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(label); + } + + public static LicenseStatus readFrom(StreamInput in) throws IOException { + return fromString(in.readString()); + } + + public static LicenseStatus fromString(String value) { + switch (value) { + case "active": + return ACTIVE; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown license status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java new file mode 100644 index 0000000000000..18745653e761e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/LicensesStatus.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.util.Locale; + +public enum LicensesStatus { + VALID((byte) 0), + INVALID((byte) 1), + EXPIRED((byte) 2); + + private final byte id; + + LicensesStatus(byte id) { + this.id = id; + } + + public int id() { + return id; + } + + public static LicensesStatus fromId(int id) { + if (id == 0) { + return VALID; + } else if (id == 1) { + return INVALID; + } else if (id == 2) { + return EXPIRED; + } else { + throw new IllegalStateException("no valid LicensesStatus for id=" + id); + } + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + public static LicensesStatus fromString(String value) { + switch (value) { + case "valid": + return VALID; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown licenses status [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java new file mode 100644 index 0000000000000..342e6c296e7ed --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseRequest.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class PutLicenseRequest extends AcknowledgedRequest { + + private String licenseDefinition; + private boolean acknowledge = false; + + public PutLicenseRequest() { + + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public void setLicenseDefinition(String licenseDefinition) { + this.licenseDefinition = licenseDefinition; + } + + public String getLicenseDefinition() { + return licenseDefinition; + } + + public void setAcknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + } + + public boolean isAcknowledge() { + return acknowledge; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java new file mode 100644 index 0000000000000..206c5a3b38366 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.common.ProtocolUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class PutLicenseResponse extends AcknowledgedResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "put_license_response", true, (a, v) -> { + boolean acknowledged = (Boolean) a[0]; + LicensesStatus licensesStatus = LicensesStatus.fromString((String) a[1]); + @SuppressWarnings("unchecked") Tuple> acknowledgements = (Tuple>) a[2]; + if (acknowledgements == null) { + return new PutLicenseResponse(acknowledged, licensesStatus); + } else { + return new PutLicenseResponse(acknowledged, licensesStatus, acknowledgements.v1(), acknowledgements.v2()); + } + + }); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); + PARSER.declareString(constructorArg(), new ParseField("license_status")); + PARSER.declareObject(optionalConstructorArg(), (parser, v) -> { + Map acknowledgeMessages = new HashMap<>(); + String message = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (currentFieldName == null) { + throw new XContentParseException(parser.getTokenLocation(), "expected message header or acknowledgement"); + } + if ("message".equals(currentFieldName)) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected message header type"); + } + message = parser.text(); + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement type"); + } + List acknowledgeMessagesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement text"); + } + acknowledgeMessagesList.add(parser.text()); + } + acknowledgeMessages.put(currentFieldName, acknowledgeMessagesList.toArray(new String[0])); + } + } + } + return new Tuple<>(message, acknowledgeMessages); + }, + new ParseField("acknowledge")); + } + + private LicensesStatus status; + private Map acknowledgeMessages; + private String acknowledgeHeader; + + public PutLicenseResponse() { + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status) { + this(acknowledged, status, null, Collections.emptyMap()); + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status, String acknowledgeHeader, + Map acknowledgeMessages) { + super(acknowledged); + this.status = status; + this.acknowledgeHeader = acknowledgeHeader; + this.acknowledgeMessages = acknowledgeMessages; + } + + public LicensesStatus status() { + return status; + } + + public Map acknowledgeMessages() { + return acknowledgeMessages; + } + + public String acknowledgeHeader() { + return acknowledgeHeader; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + status = LicensesStatus.fromId(in.readVInt()); + acknowledgeHeader = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(status.id()); + out.writeOptionalString(acknowledgeHeader); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.field("license_status", status.toString()); + if (!acknowledgeMessages.isEmpty()) { + builder.startObject("acknowledge"); + builder.field("message", acknowledgeHeader); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static PutLicenseResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + PutLicenseResponse that = (PutLicenseResponse) o; + + return status == that.status && + ProtocolUtils.equals(acknowledgeMessages, that.acknowledgeMessages) && + Objects.equals(acknowledgeHeader, that.acknowledgeHeader); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), status, ProtocolUtils.hashCode(acknowledgeMessages), acknowledgeHeader); + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java new file mode 100644 index 0000000000000..a0a80a9958b95 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's License + * APIs. + */ +package org.elasticsearch.protocol.xpack.license; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java new file mode 100644 index 0000000000000..17afee59fa156 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class IndexUpgradeInfoRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + + public IndexUpgradeInfoRequest(String... indices) { + indices(indices); + } + + public IndexUpgradeInfoRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndexUpgradeInfoRequest indices(String... indices) { + this.indices = Objects.requireNonNull(indices, "indices cannot be null"); + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoRequest request = (IndexUpgradeInfoRequest) o; + return Arrays.equals(indices, request.indices) && + Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java new file mode 100644 index 0000000000000..17115ac9b1711 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class IndexUpgradeInfoResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField INDICES = new ParseField("indices"); + private static final ParseField ACTION_REQUIRED = new ParseField("action_required"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("IndexUpgradeInfoResponse", + true, + (a, c) -> { + @SuppressWarnings("unchecked") + Map map = (Map)a[0]; + Map actionsRequired = map.entrySet().stream() + .filter(e -> { + if (e.getValue() instanceof Map == false) { + return false; + } + @SuppressWarnings("unchecked") + Map value =(Map)e.getValue(); + return value.containsKey(ACTION_REQUIRED.getPreferredName()); + }) + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> { + @SuppressWarnings("unchecked") + Map value = (Map) e.getValue(); + return UpgradeActionRequired.fromString((String)value.get(ACTION_REQUIRED.getPreferredName())); + } + )); + return new IndexUpgradeInfoResponse(actionsRequired); + }); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), INDICES); + } + + + private Map actions; + + public IndexUpgradeInfoResponse() { + + } + + public IndexUpgradeInfoResponse(Map actions) { + this.actions = actions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + } + + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.startObject(INDICES.getPreferredName()); + for (Map.Entry entry : actions.entrySet()) { + builder.startObject(entry.getKey()); + { + builder.field(ACTION_REQUIRED.getPreferredName(), entry.getValue().toString()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexUpgradeInfoResponse response = (IndexUpgradeInfoResponse) o; + return Objects.equals(actions, response.actions); + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + + public static IndexUpgradeInfoResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java new file mode 100644 index 0000000000000..dce1c7d18f50e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * Indicates the type of the upgrade required for the index + */ +public enum UpgradeActionRequired implements Writeable { + NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed + UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required + REINDEX, // The index should be reindex + UPGRADE; // The index should go through the upgrade procedure + + public static UpgradeActionRequired fromString(String value) { + return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { + return in.readEnum(UpgradeActionRequired.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java new file mode 100644 index 0000000000000..7c52f6a8fd4f1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Migration + * APIs. + */ +package org.elasticsearch.protocol.xpack.migration; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java new file mode 100644 index 0000000000000..3ed877d08cccd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for miscellaneous X-Pack APIs. + */ +package org.elasticsearch.protocol.xpack; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/User.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/User.java new file mode 100644 index 0000000000000..16ed33ae94087 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/User.java @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.security; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; + +/** + * An authenticated user + */ +public class User implements ToXContentObject { + + private final String username; + private final String[] roles; + private final User authenticatedUser; + private final Map metadata; + private final boolean enabled; + + @Nullable private final String fullName; + @Nullable private final String email; + + public User(String username, String... roles) { + this(username, roles, null, null, null, true); + } + + public User(String username, String[] roles, User authenticatedUser) { + this(username, roles, null, null, null, true, authenticatedUser); + } + + public User(User user, User authenticatedUser) { + this(user.principal(), user.roles(), user.fullName(), user.email(), user.metadata(), user.enabled(), authenticatedUser); + } + + public User(String username, String[] roles, String fullName, String email, Map metadata, boolean enabled) { + this(username, roles, fullName, email, metadata, enabled, null); + } + + private User(String username, String[] roles, String fullName, String email, Map metadata, boolean enabled, + User authenticatedUser) { + this.username = username; + this.roles = roles == null ? Strings.EMPTY_ARRAY : roles; + this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); + this.fullName = fullName; + this.email = email; + this.enabled = enabled; + assert (authenticatedUser == null || authenticatedUser.isRunAs() == false) : "the authenticated user should not be a run_as user"; + this.authenticatedUser = authenticatedUser; + } + + /** + * @return The principal of this user - effectively serving as the + * unique identity of of the user. + */ + public String principal() { + return this.username; + } + + /** + * @return The roles this user is associated with. The roles are + * identified by their unique names and each represents as + * set of permissions + */ + public String[] roles() { + return this.roles; + } + + /** + * @return The metadata that is associated with this user. Can never be {@code null}. + */ + public Map metadata() { + return metadata; + } + + /** + * @return The full name of this user. May be {@code null}. + */ + public String fullName() { + return fullName; + } + + /** + * @return The email of this user. May be {@code null}. + */ + public String email() { + return email; + } + + /** + * @return whether the user is enabled or not + */ + public boolean enabled() { + return enabled; + } + + /** + * @return The user that was originally authenticated. + * This may be the user itself, or a different user which used runAs. + */ + public User authenticatedUser() { + return authenticatedUser == null ? this : authenticatedUser; + } + + /** Return true if this user was not the originally authenticated user, false otherwise. */ + public boolean isRunAs() { + return authenticatedUser != null; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("User[username=").append(username); + sb.append(",roles=[").append(Strings.arrayToCommaDelimitedString(roles)).append("]"); + sb.append(",fullName=").append(fullName); + sb.append(",email=").append(email); + sb.append(",metadata="); + sb.append(metadata); + if (authenticatedUser != null) { + sb.append(",authenticatedUser=[").append(authenticatedUser.toString()).append("]"); + } + sb.append("]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof User == false) return false; + + User user = (User) o; + + if (!username.equals(user.username)) return false; + // Probably incorrect - comparing Object[] arrays with Arrays.equals + if (!Arrays.equals(roles, user.roles)) return false; + if (authenticatedUser != null ? !authenticatedUser.equals(user.authenticatedUser) : user.authenticatedUser != null) return false; + if (!metadata.equals(user.metadata)) return false; + if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) return false; + return !(email != null ? !email.equals(user.email) : user.email != null); + + } + + @Override + public int hashCode() { + int result = username.hashCode(); + result = 31 * result + Arrays.hashCode(roles); + result = 31 * result + (authenticatedUser != null ? authenticatedUser.hashCode() : 0); + result = 31 * result + metadata.hashCode(); + result = 31 * result + (fullName != null ? fullName.hashCode() : 0); + result = 31 * result + (email != null ? email.hashCode() : 0); + return result; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.USERNAME.getPreferredName(), principal()); + builder.array(Fields.ROLES.getPreferredName(), roles()); + builder.field(Fields.FULL_NAME.getPreferredName(), fullName()); + builder.field(Fields.EMAIL.getPreferredName(), email()); + builder.field(Fields.METADATA.getPreferredName(), metadata()); + builder.field(Fields.ENABLED.getPreferredName(), enabled()); + return builder.endObject(); + } + + public static User partialReadFrom(String username, StreamInput input) throws IOException { + String[] roles = input.readStringArray(); + Map metadata = input.readMap(); + String fullName = input.readOptionalString(); + String email = input.readOptionalString(); + boolean enabled = input.readBoolean(); + User outerUser = new User(username, roles, fullName, email, metadata, enabled, null); + boolean hasInnerUser = input.readBoolean(); + if (hasInnerUser) { + User innerUser = readFrom(input); + return new User(outerUser, innerUser); + } else { + return outerUser; + } + } + + public static User readFrom(StreamInput input) throws IOException { + final boolean isInternalUser = input.readBoolean(); + assert isInternalUser == false: "should always return false. Internal users should use the InternalUserSerializationHelper"; + final String username = input.readString(); + return partialReadFrom(username, input); + } + + public static void writeTo(User user, StreamOutput output) throws IOException { + if (user.authenticatedUser == null) { + // no backcompat necessary, since there is no inner user + writeUser(user, output); + } else { + writeUser(user, output); + output.writeBoolean(true); + writeUser(user.authenticatedUser, output); + } + output.writeBoolean(false); // last user written, regardless of bwc, does not have an inner user + } + + /** Write just the given {@link User}, but not the inner {@link #authenticatedUser}. */ + private static void writeUser(User user, StreamOutput output) throws IOException { + output.writeBoolean(false); // not a system user + output.writeString(user.username); + output.writeStringArray(user.roles); + output.writeMap(user.metadata); + output.writeOptionalString(user.fullName); + output.writeOptionalString(user.email); + output.writeBoolean(user.enabled); + } + + public interface Fields { + ParseField USERNAME = new ParseField("username"); + ParseField PASSWORD = new ParseField("password"); + ParseField PASSWORD_HASH = new ParseField("password_hash"); + ParseField ROLES = new ParseField("roles"); + ParseField FULL_NAME = new ParseField("full_name"); + ParseField EMAIL = new ParseField("email"); + ParseField METADATA = new ParseField("metadata"); + ParseField ENABLED = new ParseField("enabled"); + ParseField TYPE = new ParseField("type"); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java new file mode 100644 index 0000000000000..ce627b267f31e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/security/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Security + * APIs. + */ +package org.elasticsearch.protocol.xpack.security; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java new file mode 100644 index 0000000000000..4a458b69a750d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; + +import java.io.IOException; + +/** + * A delete watch request to delete an watch by name (id) + */ +public class DeleteWatchRequest extends ActionRequest { + + private String id; + private long version = Versions.MATCH_ANY; + + public DeleteWatchRequest() { + this(null); + } + + public DeleteWatchRequest(String id) { + this.id = id; + } + + /** + * @return The name of the watch to be deleted + */ + public String getId() { + return id; + } + + /** + * Sets the name of the watch to be deleted + */ + public void setId(String id) { + this.id = id; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (PutWatchRequest.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeLong(version); + } + + @Override + public String toString() { + return "delete [" + id + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java new file mode 100644 index 0000000000000..39cd5e966fa12 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_delete_watch_response", DeleteWatchResponse::new); + static { + PARSER.declareString(DeleteWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(DeleteWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(DeleteWatchResponse::setFound, new ParseField("found")); + } + + private String id; + private long version; + private boolean found; + + public DeleteWatchResponse() { + } + + public DeleteWatchResponse(String id, long version, boolean found) { + this.id = id; + this.version = version; + this.found = found; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isFound() { + return found; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setFound(boolean found) { + this.found = found; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DeleteWatchResponse that = (DeleteWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(found, that.found); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, found); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(found); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("found", found) + .endObject(); + } + + public static DeleteWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java new file mode 100644 index 0000000000000..7997d853db37a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.regex.Pattern; + +/** + * This request class contains the data needed to create a watch along with the name of the watch. + * The name of the watch will become the ID of the indexed document. + */ +public final class PutWatchRequest extends ActionRequest { + + private static final Pattern NO_WS_PATTERN = Pattern.compile("\\S+"); + + private String id; + private BytesReference source; + private XContentType xContentType = XContentType.JSON; + private boolean active = true; + private long version = Versions.MATCH_ANY; + + public PutWatchRequest() {} + + public PutWatchRequest(StreamInput in) throws IOException { + readFrom(in); + } + + public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + this.id = id; + this.source = source; + this.xContentType = xContentType; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + source = in.readBytesReference(); + active = in.readBoolean(); + xContentType = in.readEnum(XContentType.class); + version = in.readZLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBytesReference(source); + out.writeBoolean(active); + out.writeEnum(xContentType); + out.writeZLong(version); + } + + /** + * @return The name that will be the ID of the indexed document + */ + public String getId() { + return id; + } + + /** + * Set the watch name + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return The source of the watch + */ + public BytesReference getSource() { + return source; + } + + /** + * Set the source of the watch + */ + public void setSource(BytesReference source, XContentType xContentType) { + this.source = source; + this.xContentType = xContentType; + } + + /** + * @return The initial active state of the watch (defaults to {@code true}, e.g. "active") + */ + public boolean isActive() { + return active; + } + + /** + * Sets the initial active state of the watch + */ + public void setActive(boolean active) { + this.active = active; + } + + /** + * Get the content type for the source + */ + public XContentType xContentType() { + return xContentType; + } + + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null) { + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + if (source == null) { + validationException = ValidateActions.addValidationError("watch source is missing", validationException); + } + if (xContentType == null) { + validationException = ValidateActions.addValidationError("request body is missing", validationException); + } + return validationException; + } + + public static boolean isValidId(String id) { + return Strings.isEmpty(id) == false && NO_WS_PATTERN.matcher(id).matches(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java new file mode 100644 index 0000000000000..f6e55ff555339 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class PutWatchResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER + = new ObjectParser<>("x_pack_put_watch_response", PutWatchResponse::new); + static { + PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); + } + + private String id; + private long version; + private boolean created; + + public PutWatchResponse() { + } + + public PutWatchResponse(String id, long version, boolean created) { + this.id = id; + this.version = version; + this.created = created; + } + + private void setId(String id) { + this.id = id; + } + + private void setVersion(long version) { + this.version = version; + } + + private void setCreated(boolean created) { + this.created = created; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isCreated() { + return created; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + PutWatchResponse that = (PutWatchResponse) o; + + return Objects.equals(id, that.id) && Objects.equals(version, that.version) && Objects.equals(created, that.created); + } + + @Override + public int hashCode() { + return Objects.hash(id, version, created); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + created = in.readBoolean(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("_id", id) + .field("_version", version) + .field("created", created) + .endObject(); + } + + public static PutWatchResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java new file mode 100644 index 0000000000000..0d9edf3b5c035 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Request and Response objects for the default distribution's Watcher + * APIs. + */ +package org.elasticsearch.protocol.xpack.watcher; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java index 5503eb692558b..e4fd8d0435106 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.graph.action; import org.elasticsearch.action.Action; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; public class GraphExploreAction extends Action { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java index d5e756f78a20e..37456f234648a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java @@ -11,6 +11,9 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index e0b71abe966db..193695ac69362 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -115,7 +115,7 @@ public Set expandDatafeedIds(String expression, boolean allowNoDatafeeds @Override public Version getMinimalSupportedVersion() { - return Version.V_5_4_0; + return Version.V_6_0_0_alpha1; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index fb3ac55cda027..73cdbeef44259 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -72,18 +71,14 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 933e98b80ff80..56b7ec2b52fc1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -79,18 +78,14 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - force = in.readBoolean(); - } + force = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeBoolean(force); - } + out.writeBoolean(force); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index ef086b5126228..4b96a4d6b2746 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -127,9 +126,7 @@ public void readFrom(StreamInput in) throws IOException { start = in.readOptionalString(); end = in.readOptionalString(); advanceTime = in.readOptionalString(); - if (in.getVersion().after(Version.V_5_5_0)) { - skipTime = in.readOptionalString(); - } + skipTime = in.readOptionalString(); } @Override @@ -139,9 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(start); out.writeOptionalString(end); out.writeOptionalString(advanceTime); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeOptionalString(skipTime); - } + out.writeOptionalString(skipTime); } @Override @@ -222,18 +217,14 @@ public Date getLastFinalizedBucketEnd() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); flushed = in.readBoolean(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(flushed); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java index 29b3d4bb8d557..c6c87ef0e465d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -162,7 +161,7 @@ public PageParams getPageParams() { public void setPageParams(PageParams pageParams) { if (timestamp != null) { - throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "] is incompatible with [" + TIMESTAMP.getPreferredName() + "]."); } this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName()); @@ -212,10 +211,8 @@ public void readFrom(StreamInput in) throws IOException { end = in.readOptionalString(); anomalyScore = in.readOptionalDouble(); pageParams = in.readOptionalWriteable(PageParams::new); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - sort = in.readString(); - descending = in.readBoolean(); - } + sort = in.readString(); + descending = in.readBoolean(); } @Override @@ -229,10 +226,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(end); out.writeOptionalDouble(anomalyScore); out.writeOptionalWriteable(pageParams); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeString(sort); - out.writeBoolean(descending); - } + out.writeString(sort); + out.writeBoolean(descending); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index c108a983aa17b..fc38d974defff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -168,10 +168,6 @@ public JobParams(String jobId) { public JobParams(StreamInput in) throws IOException { jobId = in.readString(); - if (in.getVersion().onOrBefore(Version.V_5_5_0)) { - // Read `ignoreDowntime` - in.readBoolean(); - } timeout = TimeValue.timeValueMillis(in.readVLong()); } @@ -199,10 +195,6 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - if (out.getVersion().onOrBefore(Version.V_5_5_0)) { - // Write `ignoreDowntime` - true by default - out.writeBoolean(true); - } out.writeVLong(timeout.millis()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 1034b00af0a34..cdf25438cea33 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -189,10 +189,6 @@ public DatafeedConfig(StreamInput in) throws IOException { this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // read former _source field - in.readBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); if (in.getVersion().onOrAfter(Version.V_6_2_0)) { this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); @@ -290,10 +286,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // write former _source field - out.writeBoolean(false); - } out.writeOptionalWriteable(chunkingConfig); if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index d894f7b339fe5..70102f27a5669 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,14 +48,6 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { DatafeedState state = this; - // STARTING & STOPPING states were introduced in v5.5. - if (out.getVersion().before(Version.V_5_5_0)) { - if (this == STARTING) { - state = STOPPED; - } else if (this == STOPPING) { - state = STARTED; - } - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index f3748cefc51bc..d5425bdd1f469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -122,10 +121,6 @@ public DatafeedUpdate(StreamInput in) throws IOException { this.scriptFields = null; } this.scrollSize = in.readOptionalVInt(); - if (in.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - in.readOptionalBoolean(); - } this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); } @@ -163,10 +158,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); - if (out.getVersion().before(Version.V_5_5_0)) { - // TODO for former _source param - remove in v7.0.0 - out.writeOptionalBoolean(null); - } out.writeOptionalWriteable(chunkingConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 93aa5495c409e..b5083aeecb9ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.job.config; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -248,12 +247,7 @@ public Detector(StreamInput in) throws IOException { useNull = in.readBoolean(); excludeFrequent = in.readBoolean() ? ExcludeFrequent.readFromStream(in) : null; rules = Collections.unmodifiableList(in.readList(DetectionRule::new)); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - detectorIndex = in.readInt(); - } else { - // negative means unknown, and is expected for 5.4 jobs - detectorIndex = -1; - } + detectorIndex = in.readInt(); } @Override @@ -276,9 +270,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeInt(detectorIndex); - } + out.writeInt(detectorIndex); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 0005d16a99c94..a978612fd02e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -214,11 +214,7 @@ private Job(String jobId, String jobType, Version jobVersion, List group public Job(StreamInput in) throws IOException { jobId = in.readString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } else { - jobVersion = null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = Collections.unmodifiableList(in.readList(StreamInput::readString)); } else { @@ -482,13 +478,11 @@ public long earliestValidTimestamp(DataCounts dataCounts) { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); @@ -666,9 +660,7 @@ private static void checkValueNotLessThan(long minVal, String name, Long value) */ public static Set getCompatibleJobTypes(Version nodeVersion) { Set compatibleTypes = new HashSet<>(); - if (nodeVersion.onOrAfter(Version.V_5_4_0)) { - compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); - } + compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); return compatibleTypes; } @@ -732,9 +724,7 @@ public Builder(Job job) { public Builder(StreamInput in) throws IOException { id = in.readOptionalString(); jobType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - } + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; if (in.getVersion().onOrAfter(Version.V_6_1_0)) { groups = in.readList(StreamInput::readString); } else { @@ -921,13 +911,11 @@ public List invalidCreateTimeSettings() { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeString(jobType); - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeStringList(groups); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java index e89149a062b68..948284d5e0080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -34,10 +33,6 @@ public static JobState fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { JobState state = this; - // Pre v5.5 the OPENING state didn't exist - if (this == OPENING && out.getVersion().before(Version.V_5_5_0)) { - state = CLOSED; - } out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index ad8b24e66c643..2d9afa833c3c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.output; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -58,17 +57,13 @@ public FlushAcknowledgement(String id, Date lastFinalizedBucketEnd) { public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); - if (in.getVersion().after(Version.V_5_5_0)) { - lastFinalizedBucketEnd = new Date(in.readVLong()); - } + lastFinalizedBucketEnd = new Date(in.readVLong()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - if (out.getVersion().after(Version.V_5_5_0)) { - out.writeVLong(lastFinalizedBucketEnd.getTime()); - } + out.writeVLong(lastFinalizedBucketEnd.getTime()); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 03487500d8a8b..068b998dc251a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -143,7 +143,7 @@ public ModelSnapshot(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { minVersion = Version.readVersion(in); } else { - minVersion = Version.V_5_5_0; + minVersion = Version.CURRENT.minimumCompatibilityVersion(); } timestamp = in.readBoolean() ? new Date(in.readVLong()) : null; description = in.readOptionalString(); @@ -357,9 +357,8 @@ public static class Builder { private String jobId; // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 360bcfaaeadfd..869cdcb437e1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -163,10 +162,6 @@ public AnomalyRecord(String jobId, Date timestamp, long bucketSpan) { @SuppressWarnings("unchecked") public AnomalyRecord(StreamInput in) throws IOException { jobId = in.readString(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } detectorIndex = in.readInt(); probability = in.readDouble(); byFieldName = in.readOptionalString(); @@ -201,10 +196,6 @@ public AnomalyRecord(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeInt(detectorIndex); out.writeDouble(probability); out.writeOptionalString(byFieldName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 8a7fe2395b4e0..8280ee9f22ef0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -137,19 +137,11 @@ public Bucket(StreamInput in) throws IOException { anomalyScore = in.readDouble(); bucketSpan = in.readLong(); initialAnomalyScore = in.readDouble(); - // bwc for recordCount - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } records = in.readList(AnomalyRecord::new); eventCount = in.readLong(); isInterim = in.readBoolean(); bucketInfluencers = in.readList(BucketInfluencer::new); processingTimeMs = in.readLong(); - // bwc for perPartitionMaxProbability - if (in.getVersion().before(Version.V_5_5_0)) { - in.readGenericValue(); - } // bwc for perPartitionNormalization if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); @@ -171,19 +163,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(anomalyScore); out.writeLong(bucketSpan); out.writeDouble(initialAnomalyScore); - // bwc for recordCount - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } out.writeList(records); out.writeLong(eventCount); out.writeBoolean(isInterim); out.writeList(bucketInfluencers); out.writeLong(processingTimeMs); - // bwc for perPartitionMaxProbability - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeGenericValue(Collections.emptyMap()); - } // bwc for perPartitionNormalization if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java index 8b18562ec6d1e..38d76789a2ea6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -100,10 +99,6 @@ public BucketInfluencer(StreamInput in) throws IOException { isInterim = in.readBoolean(); timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -117,10 +112,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isInterim); out.writeLong(timestamp.getTime()); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java index 97ed643c44dd5..8ee49cb88d05f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,10 +96,6 @@ public Influencer(StreamInput in) throws IOException { influencerScore = in.readDouble(); isInterim = in.readBoolean(); bucketSpan = in.readLong(); - // bwc for removed sequenceNum field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readInt(); - } } @Override @@ -114,10 +109,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(influencerScore); out.writeBoolean(isInterim); out.writeLong(bucketSpan); - // bwc for removed sequenceNum field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeInt(0); - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java index c331d8b043797..9f066b6e98ec3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java @@ -109,20 +109,7 @@ public ModelPlot(String jobId, Date timestamp, long bucketSpan, int detectorInde public ModelPlot(StreamInput in) throws IOException { jobId = in.readString(); - // timestamp isn't optional in v5.5 - if (in.getVersion().before(Version.V_5_5_0)) { - if (in.readBoolean()) { - timestamp = new Date(in.readLong()); - } else { - timestamp = new Date(); - } - } else { - timestamp = new Date(in.readLong()); - } - // bwc for removed id field - if (in.getVersion().before(Version.V_5_5_0)) { - in.readOptionalString(); - } + timestamp = new Date(in.readLong()); partitionFieldName = in.readOptionalString(); partitionFieldValue = in.readOptionalString(); overFieldName = in.readOptionalString(); @@ -138,11 +125,7 @@ public ModelPlot(StreamInput in) throws IOException { } else { actual = in.readOptionalDouble(); } - if (in.getVersion().onOrAfter(Version.V_5_5_0)) { - bucketSpan = in.readLong(); - } else { - bucketSpan = 0; - } + bucketSpan = in.readLong(); if (in.getVersion().onOrAfter(Version.V_6_1_0)) { detectorIndex = in.readInt(); } else { @@ -154,20 +137,7 @@ public ModelPlot(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - // timestamp isn't optional in v5.5 - if (out.getVersion().before(Version.V_5_5_0)) { - boolean hasTimestamp = timestamp != null; - out.writeBoolean(hasTimestamp); - if (hasTimestamp) { - out.writeLong(timestamp.getTime()); - } - } else { - out.writeLong(timestamp.getTime()); - } - // bwc for removed id field - if (out.getVersion().before(Version.V_5_5_0)) { - out.writeOptionalString(null); - } + out.writeLong(timestamp.getTime()); out.writeOptionalString(partitionFieldName); out.writeOptionalString(partitionFieldValue); out.writeOptionalString(overFieldName); @@ -189,9 +159,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeOptionalDouble(actual); } - if (out.getVersion().onOrAfter(Version.V_5_5_0)) { - out.writeLong(bucketSpan); - } + out.writeLong(bucketSpan); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { out.writeInt(detectorIndex); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index 77dfa1cbbb1c3..281277043c829 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -211,10 +211,6 @@ public Map toAggCap() { return map; } - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval.toString()); - } - public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java index 0480050bf52f0..1e1f88a7c20e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistogramGroupConfig.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -115,8 +116,8 @@ public Map toAggCap() { return map; } - public Map getMetadata() { - return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval); + public Set getAllFields() { + return Arrays.stream(fields).collect(Collectors.toSet()); } public void validateMappings(Map> fieldCapsResponse, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index ffc0257313b36..66a2eb358986a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -3,8 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core.scheduler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -14,6 +19,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -89,13 +95,20 @@ public interface Schedule { } private final Map schedules = ConcurrentCollections.newConcurrentMap(); - private final ScheduledExecutorService scheduler; private final Clock clock; + private final ScheduledExecutorService scheduler; + private final Logger logger; private final List listeners = new CopyOnWriteArrayList<>(); - public SchedulerEngine(Settings settings, Clock clock) { - this.clock = clock; - this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(settings, "trigger_engine_scheduler")); + public SchedulerEngine(final Settings settings, final Clock clock) { + this(settings, clock, LogManager.getLogger(SchedulerEngine.class)); + } + + SchedulerEngine(final Settings settings, final Clock clock, final Logger logger) { + this.clock = Objects.requireNonNull(clock, "clock"); + this.scheduler = Executors.newScheduledThreadPool( + 1, EsExecutors.daemonThreadFactory(Objects.requireNonNull(settings, "settings"), "trigger_engine_scheduler")); + this.logger = Objects.requireNonNull(logger, "logger"); } public void register(Listener listener) { @@ -144,10 +157,15 @@ public int jobCount() { return schedules.size(); } - protected void notifyListeners(String name, long triggeredTime, long scheduledTime) { + protected void notifyListeners(final String name, final long triggeredTime, final long scheduledTime) { final Event event = new Event(name, triggeredTime, scheduledTime); - for (Listener listener : listeners) { - listener.triggered(event); + for (final Listener listener : listeners) { + try { + listener.triggered(event); + } catch (final Exception e) { + // do not allow exceptions to escape this method; we should continue to notify listeners and schedule the next run + logger.warn(new ParameterizedMessage("listener failed while handling triggered event [{}]", name), e); + } } } @@ -169,8 +187,20 @@ class ActiveSchedule implements Runnable { @Override public void run() { - long triggeredTime = clock.millis(); - notifyListeners(name, triggeredTime, scheduledTime); + final long triggeredTime = clock.millis(); + try { + notifyListeners(name, triggeredTime, scheduledTime); + } catch (final Throwable t) { + /* + * Allowing the throwable to escape here will lead to be it being caught in FutureTask#run and set as the outcome of this + * task; however, we never inspect the the outcomes of these scheduled tasks and so allowing the throwable to escape + * unhandled here could lead to us losing fatal errors. Instead, we rely on ExceptionsHelper#maybeDieOnAnotherThread to + * appropriately dispatch any error to the uncaught exception handler. We should never see an exception here as these do + * not escape from SchedulerEngine#notifyListeners. + */ + ExceptionsHelper.maybeDieOnAnotherThread(t); + throw t; + } scheduleNextRun(triggeredTime); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 96c9c817182ff..82863a6e8d155 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -167,7 +167,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < indicesSize; i++) { indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.createFrom(in)); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readList(RoleDescriptor.ApplicationResourcePrivileges::createFrom); conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); } @@ -185,7 +185,7 @@ public void writeTo(StreamOutput out) throws IOException { for (RoleDescriptor.IndicesPrivileges index : indicesPrivileges) { index.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeStreamableList(applicationPrivileges); ConditionalClusterPrivileges.writeArray(out, this.conditionalClusterPrivileges); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java index dc43db0115e0a..4f5aed012cb11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java @@ -109,7 +109,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < indexSize; i++) { indexPrivileges[i] = RoleDescriptor.IndicesPrivileges.createFrom(in); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); } } @@ -123,7 +123,7 @@ public void writeTo(StreamOutput out) throws IOException { for (RoleDescriptor.IndicesPrivileges priv : indexPrivileges) { priv.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, applicationPrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java index b0711fc1bc12f..8cd8b510c6499 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -66,7 +66,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); completeMatch = in.readBoolean(); index = readResourcePrivileges(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { application = in.readMap(StreamInput::readString, HasPrivilegesResponse::readResourcePrivileges); } } @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(completeMatch); writeResourcePrivileges(out, index); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeMap(application, StreamOutput::writeString, HasPrivilegesResponse::writeResourcePrivileges); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 54fd8cc7974b2..69712a6f33de7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -243,16 +243,11 @@ public static RoleDescriptor readFrom(StreamInput in) throws IOException { String[] runAs = in.readStringArray(); Map metadata = in.readMap(); - final Map transientMetadata; - if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - transientMetadata = in.readMap(); - } else { - transientMetadata = Collections.emptyMap(); - } + final Map transientMetadata = in.readMap(); final ApplicationResourcePrivileges[] applicationPrivileges; final ConditionalClusterPrivilege[] conditionalClusterPrivileges; - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { applicationPrivileges = in.readArray(ApplicationResourcePrivileges::createFrom, ApplicationResourcePrivileges[]::new); conditionalClusterPrivileges = ConditionalClusterPrivileges.readArray(in); } else { @@ -273,10 +268,8 @@ public static void writeTo(RoleDescriptor descriptor, StreamOutput out) throws I } out.writeStringArray(descriptor.runAs); out.writeMap(descriptor.metadata); - if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - out.writeMap(descriptor.transientMetadata); - } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeMap(descriptor.transientMetadata); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeArray(ApplicationResourcePrivileges::write, descriptor.applicationPrivileges); ConditionalClusterPrivileges.writeArray(out, descriptor.getConditionalClusterPrivileges()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java index 047758177fb0b..71e43ff5a30fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -16,8 +15,6 @@ public class LogstashSystemUser extends User { public static final String NAME = UsernamesField.LOGSTASH_NAME; public static final String ROLE_NAME = UsernamesField.LOGSTASH_ROLE; - public static final Version DEFINED_SINCE = Version.V_5_2_0; - public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); public LogstashSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index bb21ddbd1a13e..c2cb5af130538 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -229,7 +229,7 @@ public void testNewTrialDefaultsSecurityOff() { public void testOldTrialDefaultsSecurityOn() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_2_4)); + licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); assertThat(licenseState.isSecurityEnabled(), is(true)); assertThat(licenseState.isAuthAllowed(), is(true)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java new file mode 100644 index 0000000000000..fac99959c536a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.io.IOException; + +public class XPackInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { + return XPackInfoResponse.fromXContent(parser); + } + + @Override + protected XPackInfoResponse createBlankInstance() { + return new XPackInfoResponse(); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return path -> path.equals("features") + || (path.startsWith("features") && path.endsWith("native_code_info")); + } + + @Override + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("human", randomBoolean() ? "true" : "false"); + } + if (randomBoolean()) { + params.put("categories", "_none"); + } + return new ToXContent.MapParams(params); + } + + @Override + protected XPackInfoResponse createTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + @Override + protected XPackInfoResponse mutateInstance(XPackInfoResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new XPackInfoResponse( + mutateBuildInfo(r.getBuildInfo()), + r.getLicenseInfo(), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + mutateLicenseInfo(r.getLicenseInfo()), + r.getFeatureSetsInfo()), + r -> new XPackInfoResponse( + r.getBuildInfo(), + r.getLicenseInfo(), + mutateFeatureSetsInfo(r.getFeatureSetsInfo()))); + return mutator.apply(response); + } + + private BuildInfo randomBuildInfo() { + return new BuildInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(15)); + } + + private BuildInfo mutateBuildInfo(BuildInfo buildInfo) { + if (buildInfo == null) { + return randomBuildInfo(); + } + return null; + } + + private LicenseInfo randomLicenseInfo() { + return new LicenseInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(4), + randomAlphaOfLength(5), + randomFrom(LicenseStatus.values()), + randomLong()); + } + + private LicenseInfo mutateLicenseInfo(LicenseInfo licenseInfo) { + if (licenseInfo == null) { + return randomLicenseInfo(); + } + return null; + } + + private FeatureSetsInfo randomFeatureSetsInfo() { + int size = between(0, 10); + Set featureSets = new HashSet<>(size); + while (featureSets.size() < size) { + featureSets.add(randomFeatureSet()); + } + return new FeatureSetsInfo(featureSets); + } + + private FeatureSetsInfo mutateFeatureSetsInfo(FeatureSetsInfo featureSetsInfo) { + if (featureSetsInfo == null) { + return randomFeatureSetsInfo(); + } + return null; + } + + private FeatureSet randomFeatureSet() { + return new FeatureSet( + randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(20), + randomBoolean(), + randomBoolean(), + randomNativeCodeInfo()); + } + + private Map randomNativeCodeInfo() { + if (randomBoolean()) { + return null; + } + int size = between(0, 10); + Map nativeCodeInfo = new HashMap<>(size); + while (nativeCodeInfo.size() < size) { + nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return nativeCodeInfo; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java new file mode 100644 index 0000000000000..c4e29d7c23005 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/common/ProtocolUtilsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.common; + +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ProtocolUtilsTests extends ESTestCase { + + public void testMapStringEqualsAndHash() { + assertTrue(ProtocolUtils.equals(null, null)); + assertFalse(ProtocolUtils.equals(null, new HashMap<>())); + assertFalse(ProtocolUtils.equals(new HashMap<>(), null)); + + Map a = new HashMap<>(); + a.put("foo", new String[] { "a", "b" }); + a.put("bar", new String[] { "b", "c" }); + + Map b = new HashMap<>(); + b.put("foo", new String[] { "a", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "c", "b" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("bar", new String[] { "b", "c" }); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + + b.put("baz", new String[] { "b", "c" }); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + a.put("non", null); + + assertFalse(ProtocolUtils.equals(a, b)); + assertFalse(ProtocolUtils.equals(b, a)); + + b.put("non", null); + b.remove("baz"); + + assertTrue(ProtocolUtils.equals(a, b)); + assertTrue(ProtocolUtils.equals(b, a)); + assertEquals(ProtocolUtils.hashCode(a), ProtocolUtils.hashCode(b)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 0000000000000..4331bdd37807f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java new file mode 100644 index 0000000000000..7149477d00765 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import java.io.IOException; + +import org.elasticsearch.test.ESTestCase; + +public class LicenseStatusTests extends ESTestCase { + public void testSerialization() throws IOException { + LicenseStatus status = randomFrom(LicenseStatus.values()); + assertSame(status, copyWriteable(status, writableRegistry(), LicenseStatus::readFrom)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java new file mode 100644 index 0000000000000..a09fd6fb99b45 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +public class PutLicenseResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they + // are treated as messages from new services + return p -> p.startsWith("acknowledge"); + } + + @Override + protected PutLicenseResponse createTestInstance() { + boolean acknowledged = randomBoolean(); + LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); + String messageHeader; + Map ackMessages; + if (randomBoolean()) { + messageHeader = randomAlphaOfLength(10); + ackMessages = randomAckMessages(); + } else { + messageHeader = null; + ackMessages = Collections.emptyMap(); + } + + return new PutLicenseResponse(acknowledged, status, messageHeader, ackMessages); + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } + + @Override + protected PutLicenseResponse doParseInstance(XContentParser parser) throws IOException { + return PutLicenseResponse.fromXContent(parser); + } + + @Override + protected PutLicenseResponse createBlankInstance() { + return new PutLicenseResponse(); + } + + @Override + protected PutLicenseResponse mutateInstance(PutLicenseResponse response) { + @SuppressWarnings("unchecked") + Function mutator = randomFrom( + r -> new PutLicenseResponse( + r.isAcknowledged() == false, + r.status(), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> new PutLicenseResponse( + r.isAcknowledged(), + mutateStatus(r.status()), + r.acknowledgeHeader(), + r.acknowledgeMessages()), + r -> { + if (r.acknowledgeMessages().isEmpty()) { + return new PutLicenseResponse( + r.isAcknowledged(), + r.status(), + randomAlphaOfLength(10), + randomAckMessages() + ); + } else { + return new PutLicenseResponse(r.isAcknowledged(), r.status()); + } + } + + ); + return mutator.apply(response); + } + + private LicensesStatus mutateStatus(LicensesStatus status) { + return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values())); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java new file mode 100644 index 0000000000000..0e09a05fb967a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class IndexUpgradeInfoRequestTests extends AbstractWireSerializingTestCase { + @Override + protected IndexUpgradeInfoRequest createTestInstance() { + int indexCount = randomInt(4); + String[] indices = new String[indexCount]; + for (int i = 0; i < indexCount; i++) { + indices[i] = randomAlphaOfLength(10); + } + IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(indices); + if (randomBoolean()) { + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return IndexUpgradeInfoRequest::new; + } + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java new file mode 100644 index 0000000000000..57f01a4454e02 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.migration; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +public class IndexUpgradeInfoResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected IndexUpgradeInfoResponse doParseInstance(XContentParser parser) { + return IndexUpgradeInfoResponse.fromXContent(parser); + } + + @Override + protected IndexUpgradeInfoResponse createBlankInstance() { + return new IndexUpgradeInfoResponse(); + } + + @Override + protected IndexUpgradeInfoResponse createTestInstance() { + return randomIndexUpgradeInfoResponse(randomIntBetween(0, 10)); + } + + private static IndexUpgradeInfoResponse randomIndexUpgradeInfoResponse(int numIndices) { + Map actions = new HashMap<>(); + for (int i = 0; i < numIndices; i++) { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } + + @Override + protected IndexUpgradeInfoResponse mutateInstance(IndexUpgradeInfoResponse instance) { + if (instance.getActions().size() == 0) { + return randomIndexUpgradeInfoResponse(1); + } + Map actions = new HashMap<>(instance.getActions()); + if (randomBoolean()) { + Iterator> iterator = actions.entrySet().iterator(); + iterator.next(); + iterator.remove(); + } else { + actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); + } + return new IndexUpgradeInfoResponse(actions); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java new file mode 100644 index 0000000000000..28a27e639985d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/security/UserTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.security; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.is; + +public class UserTests extends ESTestCase { + + public void testUserToString() { + User user = new User("u1", "r1"); + assertThat(user.toString(), is("User[username=u1,roles=[r1],fullName=null,email=null,metadata={}]")); + user = new User("u1", new String[] { "r1", "r2" }, "user1", "user1@domain.com", Collections.singletonMap("key", "val"), true); + assertThat(user.toString(), is("User[username=u1,roles=[r1,r2],fullName=user1,email=user1@domain.com,metadata={key=val}]")); + user = new User("u1", new String[] {"r1"}, new User("u2", "r2", "r3")); + assertThat(user.toString(), is("User[username=u1,roles=[r1],fullName=null,email=null,metadata={}," + + "authenticatedUser=[User[username=u2,roles=[r2,r3],fullName=null,email=null,metadata={}]]]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java new file mode 100644 index 0000000000000..209bc790a8c54 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DeleteWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected DeleteWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean found = randomBoolean(); + return new DeleteWatchResponse(id, version, found); + } + + @Override + protected DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java new file mode 100644 index 0000000000000..1fc2f61b684c7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.watcher; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutWatchResponseTests extends AbstractXContentTestCase { + + @Override + protected PutWatchResponse createTestInstance() { + String id = randomAlphaOfLength(10); + long version = randomLongBetween(1, 10); + boolean created = randomBoolean(); + return new PutWatchResponse(id, version, created); + } + + @Override + protected PutWatchResponse doParseInstance(XContentParser parser) throws IOException { + return PutWatchResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 88d9b07816d44..7e53478533eb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.Set; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -479,19 +478,6 @@ public void testBuilder_givenTimeFieldInAnalysisConfig() { assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); } - public void testGetCompatibleJobTypes_givenVersionBefore_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_0_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_0).isEmpty(), is(true)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_3_2).isEmpty(), is(true)); - } - - public void testGetCompatibleJobTypes_givenVersionAfter_V_5_4() { - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0).size(), equalTo(1)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); - assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0).size(), equalTo(1)); - } - public void testInvalidCreateTimeSettings() { Job.Builder builder = new Job.Builder("invalid-settings"); builder.setModelSnapshotId("snapshot-foo"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java new file mode 100644 index 0000000000000..0f98acefe5b7c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngineTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.mockito.ArgumentCaptor; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class SchedulerEngineTests extends ESTestCase { + + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/33124") + public void testListenersThrowingExceptionsDoNotCauseOtherListenersToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + int numberOfFailingListeners = 0; + final CountDownLatch latch = new CountDownLatch(numberOfListeners); + for (int i = 0; i < numberOfListeners; i++) { + final AtomicBoolean trigger = new AtomicBoolean(); + final SchedulerEngine.Listener listener; + if (randomBoolean()) { + listener = event -> { + if (trigger.compareAndSet(false, true)) { + latch.countDown(); + } else { + fail("listener invoked twice"); + } + }; + } else { + numberOfFailingListeners++; + listener = event -> { + if (trigger.compareAndSet(false, true)) { + latch.countDown(); + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked twice"); + } + }; + } + listeners.add(Tuple.tuple(listener, trigger)); + } + + // randomize the order and register the listeners + Collections.shuffle(listeners, random()); + listeners.stream().map(Tuple::v1).forEach(engine::register); + + final AtomicBoolean scheduled = new AtomicBoolean(); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + // only allow one triggering of the listeners + if (scheduled.compareAndSet(false, true)) { + return 0; + } else { + return -1; + } + })); + + latch.await(); + + // now check that every listener was invoked + assertTrue(listeners.stream().map(Tuple::v2).allMatch(AtomicBoolean::get)); + if (numberOfFailingListeners > 0) { + assertFailedListenerLogMessage(mockLogger, numberOfFailingListeners); + } + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + public void testListenersThrowingExceptionsDoNotCauseNextScheduledTaskToBeSkipped() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final List> listeners = new ArrayList<>(); + final int numberOfListeners = randomIntBetween(1, 32); + final int numberOfSchedules = randomIntBetween(1, 32); + final CountDownLatch listenersLatch = new CountDownLatch(numberOfSchedules * numberOfListeners); + for (int i = 0; i < numberOfListeners; i++) { + final AtomicInteger triggerCount = new AtomicInteger(); + final SchedulerEngine.Listener listener = event -> { + if (triggerCount.incrementAndGet() <= numberOfSchedules) { + listenersLatch.countDown(); + throw new RuntimeException(getTestName()); + } else { + fail("listener invoked more than [" + numberOfSchedules + "] times"); + } + }; + listeners.add(Tuple.tuple(listener, triggerCount)); + engine.register(listener); + } + + // latch for each invocation of nextScheduledTimeAfter, once for each scheduled run, and then a final time when we disable + final CountDownLatch latch = new CountDownLatch(1 + numberOfSchedules); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (latch.getCount() >= 2) { + latch.countDown(); + return 0; + } else if (latch.getCount() == 1) { + latch.countDown(); + return -1; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + listenersLatch.await(); + assertTrue(listeners.stream().map(Tuple::v2).allMatch(count -> count.get() == numberOfSchedules)); + latch.await(); + assertFailedListenerLogMessage(mockLogger, numberOfListeners * numberOfSchedules); + verifyNoMoreInteractions(mockLogger); + } finally { + engine.stop(); + } + } + + private void assertFailedListenerLogMessage(Logger mockLogger, int times) { + final ArgumentCaptor messageCaptor = ArgumentCaptor.forClass(ParameterizedMessage.class); + final ArgumentCaptor throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + verify(mockLogger, times(times)).warn(messageCaptor.capture(), throwableCaptor.capture()); + for (final ParameterizedMessage message : messageCaptor.getAllValues()) { + assertThat(message.getFormat(), equalTo("listener failed while handling triggered event [{}]")); + assertThat(message.getParameters(), arrayWithSize(1)); + assertThat(message.getParameters()[0], equalTo(getTestName())); + } + for (final Throwable throwable : throwableCaptor.getAllValues()) { + assertThat(throwable, instanceOf(RuntimeException.class)); + assertThat(throwable.getMessage(), equalTo(getTestName())); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index ae458cbb2f5ed..a68a522f0242c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -58,11 +58,17 @@ public void testSerialization() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); + if (randomBoolean()) { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + logger.info("Serializing with version {}", version); + out.setVersion(version); + } original.writeTo(out); final PutRoleRequest copy = new PutRoleRequest(); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); + in.setVersion(out.getVersion()); copy.readFrom(in); assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); @@ -72,7 +78,7 @@ public void testSerializationV63AndBefore() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_3_2); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_2); out.setVersion(version); original.writeTo(out); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java index f458311e68537..a6706542e9613 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; @@ -28,9 +29,10 @@ public class HasPrivilegesRequestTests extends ESTestCase { - public void testSerializationV7() throws IOException { + public void testSerializationV64OrLater() throws IOException { final HasPrivilegesRequest original = randomRequest(); - final HasPrivilegesRequest copy = serializeAndDeserialize(original, Version.V_7_0_0_alpha1); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesRequest copy = serializeAndDeserialize(original, version); assertThat(copy.username(), equalTo(original.username())); assertThat(copy.clusterPrivileges(), equalTo(original.clusterPrivileges())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java new file mode 100644 index 0000000000000..89c58945badd0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponseTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class HasPrivilegesResponseTests extends ESTestCase { + + public void testSerializationV64OrLater() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, version); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges())); + } + + public void testSerializationV63() throws IOException { + final HasPrivilegesResponse original = randomResponse(); + final HasPrivilegesResponse copy = serializeAndDeserialize(original, Version.V_6_3_0); + + assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch())); +// assertThat(copy.getClusterPrivileges(), equalTo(original.getClusterPrivileges())); + assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges())); + assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap())); + } + + private HasPrivilegesResponse serializeAndDeserialize(HasPrivilegesResponse original, Version version) throws IOException { + logger.info("Test serialize/deserialize with version {}", version); + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + original.writeTo(out); + + final HasPrivilegesResponse copy = new HasPrivilegesResponse(); + final StreamInput in = out.bytes().streamInput(); + in.setVersion(version); + copy.readFrom(in); + assertThat(in.read(), equalTo(-1)); + return copy; + } + + private HasPrivilegesResponse randomResponse() { + final Map cluster = new HashMap<>(); + for (String priv : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { + cluster.put(priv, randomBoolean()); + } + final Collection index = randomResourcePrivileges(); + final Map> application = new HashMap<>(); + for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + application.put(app, randomResourcePrivileges()); + } + return new HasPrivilegesResponse(randomBoolean(), cluster, index, application); + } + + private Collection randomResourcePrivileges() { + final Collection list = new ArrayList<>(); + for (String resource : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 6))) { + final Map privileges = new HashMap<>(); + for (String priv : randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))) { + privileges.put(priv, randomBoolean()); + } + list.add(new HasPrivilegesResponse.ResourcePrivileges(resource, privileges)); + } + return list; + } + +} diff --git a/x-pack/plugin/deprecation/build.gradle b/x-pack/plugin/deprecation/build.gradle index 3746287d615ff..d89eb62e88492 100644 --- a/x-pack/plugin/deprecation/build.gradle +++ b/x-pack/plugin/deprecation/build.gradle @@ -10,7 +10,7 @@ esplugin { archivesBaseName = 'x-pack-deprecation' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" } run { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 0f54784a33f46..d496eea2f0d13 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,10 +7,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; @@ -23,153 +20,9 @@ import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class IndexDeprecationChecksTests extends ESTestCase { - - private static void assertSettingsAndIssue(String key, String value, DeprecationIssue expected) { - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .settings(settings(Version.V_5_6_0) - .put(key, value)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testCoerceBooleanDeprecation() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startObject("properties"); { - mapping.startObject("my_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_object"); { - mapping.startObject("properties"); { - mapping.startObject("my_inner_boolean"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - mapping.startObject("my_text"); { - mapping.field("type", "text"); - mapping.startObject("fields"); { - mapping.startObject("raw"); { - mapping.field("type", "boolean"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("testBooleanCoercion", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.INFO, - "Coercion of boolean fields", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_coercion_of_boolean_fields", - "[[type: testBooleanCoercion, field: my_boolean], [type: testBooleanCoercion, field: my_inner_boolean]," + - " [type: testBooleanCoercion, field: my_text, multifield: raw]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testMatchMappingTypeCheck() throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); { - mapping.startArray("dynamic_templates"); - { - mapping.startObject(); - { - mapping.startObject("integers"); - { - mapping.field("match_mapping_type", "UNKNOWN_VALUE"); - mapping.startObject("mapping"); - { - mapping.field("type", "integer"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endArray(); - } - mapping.endObject(); - - IndexMetaData indexMetaData = IndexMetaData.builder("test") - .putMapping("test", Strings.toString(mapping)) - .settings(settings(Version.V_5_6_0)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - - DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "Unrecognized match_mapping_type options not silently ignored", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_mappings_changes.html#_unrecognized_literal_match_mapping_type_literal_options_not_silently_ignored", - "[type: test, dynamicFieldDefinitionintegers, unknown match_mapping_type[UNKNOWN_VALUE]]"); - List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); - assertEquals(singletonList(expected), issues); - } - - public void testBaseSimilarityDefinedCheck() { - assertSettingsAndIssue("index.similarity.base.type", "classic", - new DeprecationIssue(DeprecationIssue.Level.WARNING, - "The base similarity is now ignored as coords and query normalization have been removed." + - "If provided, this setting will be ignored and issue a deprecation warning", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_similarity_settings", null)); - } - - public void testIndexStoreTypeCheck() { - assertSettingsAndIssue("index.store.type", "niofs", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "The default index.store.type has been removed. If you were using it, " + - "we advise that you simply remove it from your index settings and Elasticsearch" + - "will use the best store implementation for your operating system.", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_settings", null)); - } - public void testStoreThrottleSettingsCheck() { - assertSettingsAndIssue("index.store.throttle.max_bytes_per_sec", "32", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.max_bytes_per_sec]")); - assertSettingsAndIssue("index.store.throttle.type", "none", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "index.store.throttle settings are no longer recognized. these settings should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + - "breaking_60_settings_changes.html#_store_throttling_settings", - "present settings: [index.store.throttle.type]")); - } - - public void testSharedFileSystemSettingsCheck() { - assertSettingsAndIssue("index.shared_filesystem", "true", - new DeprecationIssue(DeprecationIssue.Level.CRITICAL, - "[index.shared_filesystem] setting should be removed", - "https://www.elastic.co/guide/en/elasticsearch/reference/6.0/" + - "breaking_60_indices_changes.html#_shadow_replicas_have_been_removed", null)); - } - public void testDelimitedPayloadFilterCheck() throws IOException { Settings settings = settings( - VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1))) .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter") .put("index.analysis.filter.my_delimited_payload_filter.delimiter", "^") .put("index.analysis.filter.my_delimited_payload_filter.encoding", "identity").build(); @@ -183,4 +36,4 @@ public void testDelimitedPayloadFilterCheck() throws IOException { List issues = DeprecationInfoAction.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); assertEquals(singletonList(expected), issues); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index 2b0f592b72040..069bfa5fbbe2b 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -10,7 +10,8 @@ esplugin { archivesBaseName = 'x-pack-graph' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index 4eb136040e988..25f2511fbc0a0 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -24,6 +24,15 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -39,16 +48,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.graph.action.Connection; -import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.ArrayList; import java.util.HashMap; diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 3f11d0c72bd44..778eb261a0705 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java index 5bebef3d2d48c..a58d8e8a8b0c6 100644 --- a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/test/GraphTests.java @@ -17,6 +17,11 @@ import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.protocol.xpack.graph.Vertex; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -24,12 +29,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreRequestBuilder; -import org.elasticsearch.xpack.core.graph.action.GraphExploreResponse; -import org.elasticsearch.xpack.core.graph.action.Hop; -import org.elasticsearch.xpack.core.graph.action.Vertex; -import org.elasticsearch.xpack.core.graph.action.VertexRequest; import java.util.Collection; import java.util.Collections; diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index 2e158a90ac7ab..1057a1c8526fc 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -10,9 +10,9 @@ esplugin { archivesBaseName = 'x-pack-logstash' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - } run { diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 3602e1b359ec2..7c3594a06cfdd 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -40,7 +40,8 @@ compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try, compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // This should not be here testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') @@ -103,9 +104,19 @@ task internalClusterTest(type: RandomizedTestingTask, include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + // also add an "alias" task to make typing on the command line easier task icTest { dependsOn internalClusterTest diff --git a/x-pack/plugin/ml/log-structure-finder/build.gradle b/x-pack/plugin/ml/log-structure-finder/build.gradle index 9048a1c46860c..f5dff6dc8464d 100644 --- a/x-pack/plugin/ml/log-structure-finder/build.gradle +++ b/x-pack/plugin/ml/log-structure-finder/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' archivesBaseName = 'x-pack-log-structure-finder' @@ -31,6 +29,6 @@ artifacts { forbiddenApisMain { // log-structure-finder does not depend on server, so cannot forbid server methods - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } diff --git a/x-pack/qa/ml-basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle similarity index 85% rename from x-pack/qa/ml-basic-multi-node/build.gradle rename to x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 3df77aadccbd5..cc5a2cd68dde5 100644 --- a/x-pack/qa/ml-basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java new file mode 100644 index 0000000000000..6e22e5b3f1879 --- /dev/null +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java @@ -0,0 +1,322 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; +import java.net.URLEncoder; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MlBasicMultiNodeIT extends ESRestTestCase { + + public void testMachineLearningInstalled() throws Exception { + Response response = client().performRequest(new Request("GET", "/_xpack")); + Map features = (Map) entityAsMap(response).get("features"); + Map ml = (Map) features.get("ml"); + assertNotNull(ml); + assertTrue((Boolean) ml.get("available")); + assertTrue((Boolean) ml.get("enabled")); + } + + public void testInvalidJob() throws Exception { + // The job name is invalid because it contains a space + String jobId = "invalid job"; + ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); + assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); + // If validation of the invalid job is not done until after transportation to the master node then the + // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to + // validate before transportation to avoid this. This test must be done in a multi-node cluster to have + // a chance of catching a problem, hence it is here rather than in the single node integration tests. + assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); + } + + public void testMiniFarequote() throws Exception { + String jobId = "mini-farequote-job"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addData = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addData.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addData); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(2, responseBody.get("processed_record_count")); + assertEquals(4, responseBody.get("processed_field_count")); + assertEquals(177, responseBody.get("input_bytes")); + assertEquals(6, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + Response closeResponse = client().performRequest(closeRequest); + assertEquals(Collections.singletonMap("closed", true), entityAsMap(closeResponse)); + + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + assertEquals(4, dataCountsDoc.get("processed_field_count")); + assertEquals(177, dataCountsDoc.get("input_bytes")); + assertEquals(6, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(0, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteWithDatafeeder() throws Exception { + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + createAirlineDataRequest.setJsonEntity("{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time\": { \"type\":\"date\"}," + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"); + client().performRequest(createAirlineDataRequest); + Request airlineData1 = new Request("PUT", "/airline-data/response/1"); + airlineData1.setJsonEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}"); + client().performRequest(airlineData1); + Request airlineData2 = new Request("PUT", "/airline-data/response/2"); + airlineData2.setJsonEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}"); + client().performRequest(airlineData2); + + // Ensure all data is searchable + client().performRequest(new Request("POST", "/_refresh")); + + String jobId = "mini-farequote-with-data-feeder-job"; + createFarequoteJob(jobId); + String datafeedId = "bar"; + createDatafeed(datafeedId, jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); + startRequest.addParameter("start", "0"); + Response startResponse = client().performRequest(startRequest); + assertEquals(Collections.singletonMap("started", true), entityAsMap(startResponse)); + + assertBusy(() -> { + try { + Response statsResponse = client().performRequest( + new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("input_record_count")); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + Response stopResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); + assertEquals(Collections.singletonMap("stopped", true), entityAsMap(stopResponse)); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + public void testMiniFarequoteReopen() throws Exception { + String jobId = "mini-farequote-reopen"; + createFarequoteJob(jobId); + + Response openResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse)); + + Request addDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + + "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + + "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + + "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse = client().performRequest(addDataRequest); + assertEquals(202, addDataResponse.getStatusLine().getStatusCode()); + Map responseBody = entityAsMap(addDataResponse); + assertEquals(5, responseBody.get("processed_record_count")); + assertEquals(10, responseBody.get("processed_field_count")); + assertEquals(446, responseBody.get("input_bytes")); + assertEquals(15, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); + + Response flushResponse = client().performRequest( + new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertFlushResponse(flushResponse, true, 1403481600000L); + + Request closeRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close"); + closeRequest.addParameter("timeout", "20s"); + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + Request statsRequest = new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + client().performRequest(statsRequest); + + Request openRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + openRequest.addParameter("timeout", "20s"); + Response openResponse2 = client().performRequest(openRequest); + assertEquals(Collections.singletonMap("opened", true), entityAsMap(openResponse2)); + + // feed some more data points + Request addDataRequest2 = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + addDataRequest2.setEntity(new NStringEntity( + "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + + "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + + "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + + "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + + "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}", + randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + Response addDataResponse2 = client().performRequest(addDataRequest2); + assertEquals(202, addDataResponse2.getStatusLine().getStatusCode()); + Map responseBody2 = entityAsMap(addDataResponse2); + assertEquals(5, responseBody2.get("processed_record_count")); + assertEquals(10, responseBody2.get("processed_field_count")); + assertEquals(442, responseBody2.get("input_bytes")); + assertEquals(15, responseBody2.get("input_field_count")); + assertEquals(0, responseBody2.get("invalid_date_count")); + assertEquals(0, responseBody2.get("missing_field_count")); + assertEquals(0, responseBody2.get("out_of_order_timestamp_count")); + assertEquals(1000, responseBody2.get("bucket_count")); + + // unintuitive: should return the earliest record timestamp of this feed??? + assertEquals(null, responseBody2.get("earliest_record_timestamp")); + assertEquals(1407082000000L, responseBody2.get("latest_record_timestamp")); + + assertEquals(Collections.singletonMap("closed", true), + entityAsMap(client().performRequest(closeRequest))); + + // counts should be summed up + Response statsResponse = client().performRequest(statsRequest); + + Map dataCountsDoc = (Map) + ((Map)((List) entityAsMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(10, dataCountsDoc.get("processed_record_count")); + assertEquals(20, dataCountsDoc.get("processed_field_count")); + assertEquals(888, dataCountsDoc.get("input_bytes")); + assertEquals(30, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(1000, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + } + + private Response createDatafeed(String datafeedId, String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("job_id", jobId); + xContentBuilder.array("indexes", "airline-data"); + xContentBuilder.array("types", "response"); + xContentBuilder.field("_source", true); + xContentBuilder.endObject(); + Request request = new Request("PUT", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private Response createFarequoteJob(String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("job_id", jobId); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8")); + request.setJsonEntity(Strings.toString(xContentBuilder)); + return client().performRequest(request); + } + + private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) + throws IOException { + Map asMap = entityAsMap(response); + assertThat(asMap.size(), equalTo(2)); + assertThat(asMap.get("flushed"), is(true)); + assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); + } +} diff --git a/x-pack/plugin/ml/qa/build.gradle b/x-pack/plugin/ml/qa/build.gradle new file mode 100644 index 0000000000000..517c93cc17862 --- /dev/null +++ b/x-pack/plugin/ml/qa/build.gradle @@ -0,0 +1,31 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + // HACK: please fix this + // we want to add the rest api specs for xpack to qa tests, but we + // need to wait until after the project is evaluated to only apply + // to those that rest tests. this used to be done automatically + // when xpack was a plugin, but now there is no place with xpack as a module. + // instead, we should package these and make them easy to use for rest tests, + // but currently, they must be copied into the resources of the test runner. + project.tasks.withType(RestIntegTestTask) { + File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' + } + } +} + +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') + if (dependenciesInfo) { + project.precommit.dependsOn.remove('dependenciesInfo') + } + } +} diff --git a/x-pack/qa/ml-disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle similarity index 80% rename from x-pack/qa/ml-disabled/build.gradle rename to x-pack/plugin/ml/qa/disabled/build.gradle index e914def3507cd..a24036651d504 100644 --- a/x-pack/qa/ml-disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java new file mode 100644 index 0000000000000..170b4f14486bc --- /dev/null +++ b/x-pack/plugin/ml/qa/disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class MlPluginDisabledIT extends ESRestTestCase { + + /** + * Check that when the ml plugin is disabled, you cannot create a job as the + * rest handler is not registered + */ + public void testActionsFail() throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + { + xContentBuilder.field("actions-fail-job", "foo"); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + { + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + { + xContentBuilder.startObject(); + { + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endArray(); + } + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + { + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + } + xContentBuilder.endObject(); + } + xContentBuilder.endObject(); + + Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/foo"); + request.setJsonEntity(Strings.toString(xContentBuilder)); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); + } +} diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle similarity index 98% rename from x-pack/qa/smoke-test-ml-with-security/build.gradle rename to x-pack/plugin/ml/qa/ml-with-security/build.gradle index 84c23add25411..a702973fcb02d 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } diff --git a/x-pack/qa/smoke-test-ml-with-security/roles.yml b/x-pack/plugin/ml/qa/ml-with-security/roles.yml similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/roles.yml rename to x-pack/plugin/ml/qa/ml-with-security/roles.yml diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java rename to x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle similarity index 95% rename from x-pack/qa/ml-native-multi-node-tests/build.gradle rename to x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index b1893b20c465b..0c4304b123ea9 100644 --- a/x-pack/qa/ml-native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -4,7 +4,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('ml'), configuration: 'runtime') testCompile project(path: xpackModule('ml'), configuration: 'testArtifacts') diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobWithGapIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobWithGapIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobWithGapIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobWithGapIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java similarity index 100% rename from x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java diff --git a/x-pack/qa/ml-no-bootstrap-tests/build.gradle b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle similarity index 64% rename from x-pack/qa/ml-no-bootstrap-tests/build.gradle rename to x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle index 7e252afa3022e..9eac3fdd37a80 100644 --- a/x-pack/qa/ml-no-bootstrap-tests/build.gradle +++ b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle @@ -1,6 +1,6 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java b/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java similarity index 100% rename from x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java rename to x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java diff --git a/x-pack/qa/ml-single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle similarity index 80% rename from x-pack/qa/ml-single-node-tests/build.gradle rename to x-pack/plugin/ml/qa/single-node-tests/build.gradle index b62e37894b3c3..88ca4dd118ea4 100644 --- a/x-pack/qa/ml-single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java similarity index 100% rename from x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java rename to x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java index 3ca3c3154506a..252cf97d0c519 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -53,11 +51,6 @@ protected void doExecute(Task task, IsolateDatafeedAction.Request request, Actio String executorNode = datafeedTask.getExecutorNode(); DiscoveryNodes nodes = state.nodes(); - if (nodes.resolveNode(executorNode).getVersion().before(Version.V_5_5_0)) { - listener.onFailure(new ElasticsearchException("Force delete datafeed is not supported because the datafeed task " + - "is running on a node [" + executorNode + "] with a version prior to " + Version.V_5_5_0)); - return; - } request.setNodes(datafeedTask.getExecutorNode()); super.doExecute(task, request, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java index b40f0368a1554..a9b43c3bcc47d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -73,12 +71,6 @@ protected void doExecute(Task task, KillProcessAction.Request request, ActionLis return; } - Version nodeVersion = executorNode.getVersion(); - if (nodeVersion.before(Version.V_5_5_0)) { - listener.onFailure(new ElasticsearchException("Cannot kill the process on node with version " + nodeVersion)); - return; - } - super.doExecute(task, request, listener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 56d03dd1aacc6..512d8188abfac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -179,14 +179,6 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - if (nodeSupportsJobVersion(node.getVersion()) == false) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) - + "], because this node does not support jobs of version [" + job.getJobVersion() + "]"; - logger.trace(reason); - reasons.add(reason); - continue; - } - if (nodeSupportsModelSnapshotVersion(node, job) == false) { String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because the job's model snapshot requires a node of version [" @@ -385,10 +377,6 @@ static List verifyIndicesPrimaryShardsAreActive(String jobId, ClusterSta return unavailableIndices; } - private static boolean nodeSupportsJobVersion(Version nodeVersion) { - return nodeVersion.onOrAfter(Version.V_5_5_0); - } - private static boolean nodeSupportsModelSnapshotVersion(DiscoveryNode node, Job job) { if (job.getModelSnapshotId() == null || job.getModelSnapshotMinVersion() == null) { // There is no snapshot to restore or the min model snapshot version is 5.5.0 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java index fc0638048ce98..ae6257d53850f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestCloseJobAction.java @@ -12,10 +12,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; @@ -34,16 +34,21 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(restRequest.param(Job.ID.getPreferredName())); - if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { - request.setCloseTimeout(TimeValue.parseTimeValue( + Request request; + if (restRequest.hasContentOrSourceParam()) { + request = Request.parseRequest(restRequest.param(Job.ID.getPreferredName()), restRequest.contentParser()); + } else { + request = new Request(restRequest.param(Job.ID.getPreferredName())); + if (restRequest.hasParam(Request.TIMEOUT.getPreferredName())) { + request.setCloseTimeout(TimeValue.parseTimeValue( restRequest.param(Request.TIMEOUT.getPreferredName()), Request.TIMEOUT.getPreferredName())); - } - if (restRequest.hasParam(Request.FORCE.getPreferredName())) { - request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); - } - if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { - request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } + if (restRequest.hasParam(Request.FORCE.getPreferredName())) { + request.setForce(restRequest.paramAsBoolean(Request.FORCE.getPreferredName(), request.isForce())); + } + if (restRequest.hasParam(Request.ALLOW_NO_JOBS.getPreferredName())) { + request.setAllowNoJobs(restRequest.paramAsBoolean(Request.ALLOW_NO_JOBS.getPreferredName(), request.allowNoJobs())); + } } return channel -> client.execute(CloseJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 02bfb1b326fd9..5bf8fb6956bfe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -423,33 +423,6 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { assertNull(result.getExecutorNode()); } - public void testSelectLeastLoadedMlNode_noNodesPriorTo_V_5_5() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_5_4_0)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("incompatible_type_job", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "incompatible_type_job"); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", cs.build(), 2, 10, 30, logger); - assertThat(result.getExplanation(), containsString("because this node does not support jobs of version [" + Version.CURRENT + "]")); - assertNull(result.getExecutorNode()); - } - public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() { Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); @@ -606,12 +579,6 @@ public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOExceptio assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); } - public void testMappingRequiresUpdateOldMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_54", Version.V_5_4_0.toString())); - String[] indices = new String[] { "version_54" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); String[] indices = new String[] { "version_bogus" }; @@ -632,21 +599,6 @@ public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOExcepti TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion(), logger)); } - public void testMappingRequiresUpdateSomeVersionMix() throws IOException { - Map versionMix = new HashMap<>(); - versionMix.put("version_54", Version.V_5_4_0); - versionMix.put("version_current", Version.CURRENT); - versionMix.put("version_null", null); - versionMix.put("version_current2", Version.CURRENT); - versionMix.put("version_bogus", "0.0.0"); - versionMix.put("version_current3", Version.CURRENT); - versionMix.put("version_bogus2", "0.0.0"); - - ClusterState cs = getClusterStateWithMappingsWithMetaData(versionMix); - String[] indices = new String[] { "version_54", "version_null", "version_bogus", "version_bogus2" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - public void testNodeNameAndVersion() { TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); Map attributes = new HashMap<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java index 8b3e68b1e5714..32699f60cbdb9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.datafeed; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DatafeedStateTests extends ESTestCase { @@ -37,35 +26,4 @@ public void testValidOrdinals() { assertEquals(2, DatafeedState.STARTING.ordinal()); assertEquals(3, DatafeedState.STOPPING.ordinal()); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // STARTING & STOPPING states were introduced in v5.5. - // Pre v5.5 STARTING translated as STOPPED - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STOPPED, enumCaptor.getValue()); - - // Pre v5.5 STOPPING means the datafeed is STARTED - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STARTED, enumCaptor.getValue()); - - // POST 5.5 enums a written as is - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - DatafeedState.STARTING.writeTo(out); - assertEquals(DatafeedState.STARTING, enumCaptor.getValue()); - DatafeedState.STOPPING.writeTo(out); - assertEquals(DatafeedState.STOPPING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java index cd983c6b0302b..2e324b6a1c201 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobStateTests.java @@ -5,19 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.config; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class JobStateTests extends ESTestCase { @@ -60,35 +49,4 @@ public void testIsAnyOf() { assertTrue(JobState.CLOSED.isAnyOf(JobState.CLOSED)); assertTrue(JobState.CLOSING.isAnyOf(JobState.CLOSING)); } - - @SuppressWarnings("unchecked") - public void testStreaming_v54BackwardsCompatibility() throws IOException { - StreamOutput out = mock(StreamOutput.class); - when(out.getVersion()).thenReturn(Version.V_5_4_0); - ArgumentCaptor enumCaptor = ArgumentCaptor.forClass(Enum.class); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - // OPENING state was introduced in v5.5. - // Pre v5.5 its translated as CLOSED - JobState.OPENING.writeTo(out); - assertEquals(JobState.CLOSED, enumCaptor.getValue()); - - when(out.getVersion()).thenReturn(Version.V_5_5_0); - - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - return null; - } - }).when(out).writeEnum(enumCaptor.capture()); - - JobState.OPENING.writeTo(out); - assertEquals(JobState.OPENING, enumCaptor.getValue()); - } } diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index a452ef09a20ff..e551d577b7bbd 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -13,7 +13,8 @@ esplugin { archivesBaseName = 'x-pack-monitoring' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // monitoring deps diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java index 57106363bc199..dc294ef53de52 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkDocTests.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.action; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -21,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import static java.util.Collections.emptyList; @@ -158,23 +155,6 @@ public void testSerialization() throws IOException { } } - public void testSerializationBwc() throws IOException { - final byte[] data = Base64.getDecoder().decode("AQNtSWQBBTUuMS4yAAAAAQEEdHlwZQECaWQNeyJmb28iOiJiYXIifQAAAAAAAAAA"); - final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, - Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - MonitoringBulkDoc bulkDoc = MonitoringBulkDoc.readFrom(in); - assertEquals(MonitoredSystem.UNKNOWN, bulkDoc.getSystem()); - assertEquals("type", bulkDoc.getType()); - assertEquals("id", bulkDoc.getId()); - assertEquals(0L, bulkDoc.getTimestamp()); - assertEquals(0L, bulkDoc.getIntervalMillis()); - assertEquals("{\"foo\":\"bar\"}", bulkDoc.getSource().utf8ToString()); - assertEquals(XContentType.JSON, bulkDoc.getXContentType()); - } - } - /** * Test that we allow strings to be "" because Logstash 5.2 - 5.3 would submit empty _id values for time-based documents */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index b336b3c885310..dc5cad7c94fd4 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.monitoring.action; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -26,7 +25,6 @@ import java.util.Collection; import java.util.List; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -254,52 +252,6 @@ public void testSerialization() throws IOException { assertArrayEquals(originalBulkDocs, deserializedBulkDocs); } - public void testSerializationBwc() throws IOException { - final MonitoringBulkRequest originalRequest = new MonitoringBulkRequest(); - - final int numDocs = iterations(10, 30); - for (int i = 0; i < numDocs; i++) { - originalRequest.add(randomMonitoringBulkDoc()); - } - - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_rc1); - - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - originalRequest.writeTo(out); - - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - - final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(); - deserializedRequest.readFrom(in); - - assertThat(in.available(), equalTo(0)); - - final MonitoringBulkDoc[] originalBulkDocs = originalRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - final MonitoringBulkDoc[] deserializedBulkDocs = deserializedRequest.getDocs().toArray(new MonitoringBulkDoc[]{}); - - assertThat(originalBulkDocs.length, equalTo(deserializedBulkDocs.length)); - - for (int i = 0; i < originalBulkDocs.length; i++) { - final MonitoringBulkDoc original = originalBulkDocs[i]; - final MonitoringBulkDoc deserialized = deserializedBulkDocs[i]; - - assertThat(deserialized.getSystem(), equalTo(original.getSystem())); - assertThat(deserialized.getType(), equalTo(original.getType())); - assertThat(deserialized.getId(), equalTo(original.getId())); - assertThat(deserialized.getTimestamp(), equalTo(original.getTimestamp())); - assertThat(deserialized.getSource(), equalTo(original.getSource())); - assertThat(deserialized.getXContentType(), equalTo(original.getXContentType())); - - if (version.onOrAfter(Version.V_6_0_0_rc1)) { - assertThat(deserialized.getIntervalMillis(), equalTo(original.getIntervalMillis())); - } else { - assertThat(deserialized.getIntervalMillis(), equalTo(0L)); - } - } - } - /** * Return a {@link XContentType} supported by the Monitoring Bulk API (JSON or Smile) */ diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java index 79279faa6f405..3d1a0bf9adedb 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/CollectorTests.java @@ -5,39 +5,11 @@ */ package org.elasticsearch.xpack.monitoring.collector; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; public class CollectorTests extends ESTestCase { public void testConvertNullNode() { assertEquals(null, Collector.convertNode(randomNonNegativeLong(), null)); } - - public void testConvertNode() { - final String name = randomBoolean() ? randomAlphaOfLength(5) : ""; - final String nodeId = randomAlphaOfLength(5); - final TransportAddress address = buildNewFakeTransportAddress(); - final Version version = randomFrom(Version.V_5_0_1, Version.V_5_3_0, Version.CURRENT); - final long timestamp = randomNonNegativeLong(); - - final Set roles = new HashSet<>(); - if (randomBoolean()) { - roles.addAll(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); - } - - final MonitoringDoc.Node expectedNode = new MonitoringDoc.Node(nodeId, address.address().getHostString(), address.toString(), - address.getAddress(), name, timestamp); - - DiscoveryNode discoveryNode = new DiscoveryNode(name, nodeId, address, Collections.emptyMap(), roles, version); - assertEquals(expectedNode, Collector.convertNode(timestamp, discoveryNode)); - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java index 513ee3bdbb66b..46ba34dcd1a50 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.exporter; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -31,14 +29,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import java.util.Map; import static java.util.Collections.emptyList; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -273,22 +269,4 @@ public void testMonitoringNodeSerialization() throws IOException { assertEquals(deserialized.hashCode(), original.hashCode()); assertNotSame(deserialized, original); } - - public void testMonitoringNodeBwcSerialization() throws IOException { - final Version version = randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_0_0_beta2); - - final byte[] data = Base64.getDecoder() - .decode("AQVFSWJKdgEDdFFOAQV3cGtMagEFa2xqeWEBBVZTamF2AwVrZXkjMgEyBWtleSMxATEFa2V5IzABMAAAAAAAAA=="); - try (StreamInput in = StreamInput.wrap(data)) { - in.setVersion(version); - - final MonitoringDoc.Node node = new MonitoringDoc.Node(in); - assertEquals("EIbJv", node.getUUID()); - assertEquals("VSjav", node.getName()); - assertEquals("tQN", node.getHost()); - assertEquals("wpkLj", node.getTransportAddress()); - assertEquals("kljya", node.getIp()); - assertEquals(0L, node.getTimestamp()); - } - } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index efc32fccb3dda..e44d6da073ef4 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -67,6 +67,7 @@ import static org.elasticsearch.threadpool.ThreadPool.Names.WRITE; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -186,7 +187,6 @@ public void testMonitoringBulk() throws Exception { * This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents * have been indexed with the expected information. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880") public void testMonitoringService() throws Exception { final boolean createAPMIndex = randomBoolean(); final String indexName = createAPMIndex ? "apm-2017.11.06" : "books"; @@ -337,7 +337,7 @@ private void assertClusterStatsMonitoringDoc(final Map document, final Map clusterStats = (Map) source.get("cluster_stats"); assertThat(clusterStats, notNullValue()); - assertThat(clusterStats.size(), equalTo(4)); + assertThat(clusterStats.size(), equalTo(5)); final Map stackStats = (Map) source.get("stack_stats"); assertThat(stackStats, notNullValue()); @@ -347,7 +347,7 @@ private void assertClusterStatsMonitoringDoc(final Map document, assertThat(apm, notNullValue()); assertThat(apm.size(), equalTo(1)); assertThat(apm.remove("found"), is(apmIndicesExist)); - assertThat(apm.isEmpty(), is(true)); + assertThat(apm.keySet(), empty()); final Map xpackStats = (Map) stackStats.get("xpack"); assertThat(xpackStats, notNullValue()); @@ -359,14 +359,14 @@ private void assertClusterStatsMonitoringDoc(final Map document, final Map clusterState = (Map) source.get("cluster_state"); assertThat(clusterState, notNullValue()); - assertThat(clusterState.size(), equalTo(6)); assertThat(clusterState.remove("nodes_hash"), notNullValue()); assertThat(clusterState.remove("status"), notNullValue()); assertThat(clusterState.remove("version"), notNullValue()); assertThat(clusterState.remove("state_uuid"), notNullValue()); + assertThat(clusterState.remove("cluster_uuid"), notNullValue()); assertThat(clusterState.remove("master_node"), notNullValue()); assertThat(clusterState.remove("nodes"), notNullValue()); - assertThat(clusterState.isEmpty(), is(true)); + assertThat(clusterState.keySet(), empty()); } /** @@ -452,6 +452,11 @@ private void assertNodeStatsMonitoringDoc(final Map document) { return; } + // bulk is not a thread pool in the current version but we allow it to support mixed version clusters + if (filter.startsWith("node_stats.thread_pool.bulk")) { + return; + } + assertThat(filter + " must not be null in the monitoring document", extractValue(filter, source), notNullValue()); }); } diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index 649a89bc2cdee..75fd22abacc58 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -16,7 +16,8 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index 4042e98ef93fb..a38adf5d9de3a 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -238,11 +238,23 @@ private static SearchResponse doCombineResponse(SearchResponse liveResponse, Lis ? (InternalAggregations)liveResponse.getAggregations() : InternalAggregations.EMPTY; - rolledResponses.forEach(r -> { - if (r == null || r.getAggregations() == null || r.getAggregations().asList().size() == 0) { - throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + int missingRollupAggs = rolledResponses.stream().mapToInt(searchResponse -> { + if (searchResponse == null + || searchResponse.getAggregations() == null + || searchResponse.getAggregations().asList().size() == 0) { + return 1; } - }); + return 0; + }).sum(); + + // We had no rollup aggs, so there is nothing to process + if (missingRollupAggs == rolledResponses.size()) { + // Return an empty response, but make sure we include all the shard, failure, etc stats + return mergeFinalResponse(liveResponse, rolledResponses, InternalAggregations.EMPTY); + } else if (missingRollupAggs > 0 && missingRollupAggs != rolledResponses.size()) { + // We were missing some but not all the aggs, unclear how to handle this. Bail. + throw new RuntimeException("Expected to find aggregations in rollup response, but none found."); + } // The combination process returns a tree that is identical to the non-rolled // which means we can use aggregation's reduce method to combine, just as if @@ -275,27 +287,39 @@ private static SearchResponse doCombineResponse(SearchResponse liveResponse, Lis new InternalAggregation.ReduceContext(reduceContext.bigArrays(), reduceContext.scriptService(), true)); } - // TODO allow profiling in the future - InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), currentTree, null, null, - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().anyMatch(SearchResponse::isTimedOut), - rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum()); + return mergeFinalResponse(liveResponse, rolledResponses, currentTree); + } + + private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, List rolledResponses, + InternalAggregations aggs) { int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum(); int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum(); int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum(); long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ; + boolean isTimedOut = rolledResponses.stream().anyMatch(SearchResponse::isTimedOut); + boolean isTerminatedEarly = rolledResponses.stream() + .filter(r -> r.isTerminatedEarly() != null) + .anyMatch(SearchResponse::isTerminatedEarly); + int numReducePhases = rolledResponses.stream().mapToInt(SearchResponse::getNumReducePhases).sum(); + if (liveResponse != null) { totalShards += liveResponse.getTotalShards(); sucessfulShards += liveResponse.getSuccessfulShards(); skippedShards += liveResponse.getSkippedShards(); took = Math.max(took, liveResponse.getTook().getMillis()); + isTimedOut = isTimedOut && liveResponse.isTimedOut(); + isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly(); + numReducePhases += liveResponse.getNumReducePhases(); } + InternalSearchResponse combinedInternal = new InternalSearchResponse(SearchHits.empty(), aggs, null, null, + isTimedOut, isTerminatedEarly, numReducePhases); + // Shard failures are ignored atm, so returning an empty array is fine return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards, - took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); + took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); } /** diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index c63ab96fa2595..ea0319c34328b 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -155,6 +155,18 @@ static MultiSearchRequest createMSearchRequest(SearchRequest request, NamedWrite rolledSearchSource.size(0); AggregatorFactories.Builder sourceAgg = request.source().aggregations(); + // If there are no aggs in the request, our translation won't create any msearch. + // So just add an dummy request to the msearch and return. This is a bit silly + // but maintains how the regular search API behaves + if (sourceAgg == null || sourceAgg.count() == 0) { + + // Note: we can't apply any query rewriting or filtering on the query because there + // are no validated caps, so we have no idea what job is intended here. The only thing + // this affects is doc count, since hits and aggs will both be empty it doesn't really matter. + msearch.add(new SearchRequest(context.getRollupIndices(), request.source()).types(request.types())); + return msearch; + } + // Find our list of "best" job caps Set validatedCaps = new HashSet<>(); sourceAgg.getAggregatorFactories() @@ -248,11 +260,6 @@ static void validateSearchRequest(SearchRequest request) { if (request.source().explain() != null && request.source().explain()) { throw new IllegalArgumentException("Rollup search does not support explaining."); } - - // Rollup is only useful if aggregations are set, throw an exception otherwise - if (request.source().aggregations() == null) { - throw new IllegalArgumentException("Rollup requires at least one aggregation to be set."); - } } static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCaps) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 87294706b3b7d..d1db021361c8c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; @@ -392,15 +393,12 @@ private SearchRequest buildSearchRequest() { private CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig config) { final GroupConfig groupConfig = config.getGroupConfig(); List> builders = new ArrayList<>(); - Map metadata = new HashMap<>(); // Add all the agg builders to our request in order: date_histo -> histo -> terms if (groupConfig != null) { builders.addAll(groupConfig.getDateHistogram().toBuilders()); - metadata.putAll(groupConfig.getDateHistogram().getMetadata()); if (groupConfig.getHistogram() != null) { builders.addAll(groupConfig.getHistogram().toBuilders()); - metadata.putAll(groupConfig.getHistogram().getMetadata()); } if (groupConfig.getTerms() != null) { builders.addAll(groupConfig.getTerms().toBuilders()); @@ -409,6 +407,8 @@ private CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig confi CompositeAggregationBuilder composite = new CompositeAggregationBuilder(AGGREGATION_NAME, builders); config.getMetricsConfig().forEach(m -> m.toBuilders().forEach(composite::subAggregation)); + + final Map metadata = createMetadata(groupConfig); if (metadata.isEmpty() == false) { composite.setMetaData(metadata); } @@ -441,5 +441,20 @@ private QueryBuilder createBoundaryQuery(Map position) { .format("epoch_millis"); return query; } + + static Map createMetadata(final GroupConfig groupConfig) { + final Map metadata = new HashMap<>(); + if (groupConfig != null) { + // Add all the metadata in order: date_histo -> histo + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), dateHistogram.getInterval().toString()); + + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + metadata.put(RollupField.formatMetaField(RollupField.INTERVAL), histogram.getInterval()); + } + } + return metadata; + } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 35d9f0d133a3d..73a4d0665c4e1 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -198,10 +198,11 @@ public void testRolledMissingAggs() { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - Exception e = expectThrows(RuntimeException.class, - () -> RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); - assertThat(e.getMessage(), equalTo("Expected to find aggregations in rollup response, but none found.")); + SearchResponse response = RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertThat(responseAggs.asList().size(), equalTo(0)); } public void testMissingRolledIndex() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 069e23e4093de..3cc6190db30d5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -307,21 +307,22 @@ public void testExplain() { assertThat(e.getMessage(), equalTo("Rollup search does not support explaining.")); } - public void testNoAgg() { - String[] normalIndices = new String[]{randomAlphaOfLength(10)}; + public void testNoRollupAgg() { + String[] normalIndices = new String[]{}; String[] rollupIndices = new String[]{randomAlphaOfLength(10)}; TransportRollupSearchAction.RollupSearchContext ctx = new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet()); SearchSourceBuilder source = new SearchSourceBuilder(); source.query(new MatchAllQueryBuilder()); source.size(0); - SearchRequest request = new SearchRequest(normalIndices, source); + SearchRequest request = new SearchRequest(rollupIndices, source); NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class); - Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.createMSearchRequest(request, registry, ctx)); - assertThat(e.getMessage(), equalTo("Rollup requires at least one aggregation to be set.")); + MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, registry, ctx); + assertThat(msearch.requests().size(), equalTo(1)); + assertThat(msearch.requests().get(0), equalTo(request)); } + public void testNoLiveNoRollup() { String[] normalIndices = new String[0]; String[] rollupIndices = new String[0]; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java new file mode 100644 index 0000000000000..5ab85e2ffa743 --- /dev/null +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.rollup.job; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class RollupIndexerTests extends ESTestCase { + + public void testCreateMetadataNoGroupConfig() { + final Map metadata = RollupIndexer.createMetadata(null); + assertNotNull(metadata); + assertTrue(metadata.isEmpty()); + } + + public void testCreateMetadataWithDateHistogramGroupConfigOnly() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(dateHistogram.getInterval().toString())); + } + + public void testCreateMetadata() { + final DateHistogramGroupConfig dateHistogram = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final HistogramGroupConfig histogram = ConfigTestHelpers.randomHistogramGroupConfig(random()); + final GroupConfig groupConfig = new GroupConfig(dateHistogram, histogram, null); + + final Map metadata = RollupIndexer.createMetadata(groupConfig); + assertEquals(1, metadata.size()); + assertTrue(metadata.containsKey("_rollup.interval")); + Object value = metadata.get("_rollup.interval"); + assertThat(value, equalTo(histogram.getInterval())); + } +} + diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 6db533bbecf9b..5198c3da66983 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -12,7 +12,8 @@ esplugin { archivesBaseName = 'x-pack-security' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -157,8 +158,7 @@ forbiddenPatterns { } forbiddenApisMain { - signaturesURLs += file('forbidden/ldap-signatures.txt').toURI().toURL() - signaturesURLs += file('forbidden/xml-signatures.txt').toURI().toURL() + signaturesFiles += files('forbidden/ldap-signatures.txt', 'forbidden/xml-signatures.txt') } // classes are missing, e.g. com.ibm.icu.lang.UCharacter diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 1a00b2a034000..426c48aac80ae 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -4,7 +4,8 @@ archivesBaseName = 'elasticsearch-security-cli' dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compile 'org.bouncycastle:bcprov-jdk15on:1.59' compile 'org.bouncycastle:bcpkix-jdk15on:1.59' testImplementation 'com.google.jimfs:jimfs:1.1' @@ -21,4 +22,4 @@ dependencyLicenses { if (inFipsJvm) { test.enabled = false -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 99c138bbb121d..0b8dbd0233550 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -226,12 +226,10 @@ private boolean userIsDefinedForCurrentSecurityMapping(String username) { private Version getDefinedVersion(String username) { switch (username) { - case LogstashSystemUser.NAME: - return LogstashSystemUser.DEFINED_SINCE; case BeatsSystemUser.NAME: return BeatsSystemUser.DEFINED_SINCE; default: - return Version.V_5_0_0; + return Version.V_6_0_0; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index c388fd5627c32..3cf2034cc74b8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -25,9 +25,9 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; import java.util.ArrayList; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index 761af81b08ec5..b686994a2ee98 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -25,7 +25,6 @@ import org.elasticsearch.transport.nio.NioTcpChannel; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.security.action.SecurityActionMapper; @@ -116,50 +115,28 @@ requests from all the nodes are attached with a user (either a serialize } } - final Version version = transportChannel.getVersion().equals(Version.V_5_4_0) ? Version.CURRENT : transportChannel.getVersion(); + final Version version = transportChannel.getVersion(); authcService.authenticate(securityAction, request, (User)null, ActionListener.wrap((authentication) -> { - if (reservedRealmEnabled && authentication.getVersion().before(Version.V_5_2_0) && - KibanaUser.NAME.equals(authentication.getUser().authenticatedUser().principal())) { - executeAsCurrentVersionKibanaUser(securityAction, request, transportChannel, listener, authentication); - } else if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && - SystemUser.is(authentication.getUser()) == false) { - securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { - final Authentication replaced = Authentication.getAuthentication(threadContext); - final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); - asyncAuthorizer.authorize(authzService); - }, version); - } else { + if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && + SystemUser.is(authentication.getUser()) == false) { + securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { + final Authentication replaced = Authentication.getAuthentication(threadContext); final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { - authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); - listener.onResponse(null); - }); + new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { + authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); asyncAuthorizer.authorize(authzService); - } - }, listener::onFailure)); - } - - private void executeAsCurrentVersionKibanaUser(String securityAction, TransportRequest request, TransportChannel transportChannel, - ActionListener listener, Authentication authentication) { - // the authentication came from an older node - so let's replace the user with our version - final User kibanaUser = new KibanaUser(authentication.getUser().enabled()); - if (kibanaUser.enabled()) { - securityContext.executeAsUser(kibanaUser, (original) -> { - final Authentication replacedUserAuth = securityContext.getAuthentication(); + }, version); + } else { final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = - new AuthorizationUtils.AsyncAuthorizer(replacedUserAuth, listener, (userRoles, runAsRoles) -> { - authzService.authorize(replacedUserAuth, securityAction, request, userRoles, runAsRoles); + new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { + authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); listener.onResponse(null); }); asyncAuthorizer.authorize(authzService); - }, transportChannel.getVersion()); - } else { - throw new IllegalStateException("a disabled user should never be sent. " + kibanaUser); - } + } + }, listener::onFailure)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 1ac5490dc0c6a..e4e1e7ca1c015 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -79,7 +79,7 @@ public void init() throws Exception { ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) - .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_5_4_0)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_6_0_0)) .build(); when(state.nodes()).thenReturn(nodes); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 04e0afcf88293..39d518a73f3b4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -444,23 +444,15 @@ public static void mockGetAllReservedUserInfo(NativeUsersStore usersStore, Map versionPredicate) { - assertThat(versionPredicate.test(Version.V_5_0_0_rc1), is(false)); switch (principal) { case LogstashSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_0_0), is(false)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(false)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; case BeatsSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_5_6_9), is(false)); assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; default: - assertThat(versionPredicate.test(Version.V_5_0_0), is(true)); - assertThat(versionPredicate.test(Version.V_5_1_1), is(true)); - assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); break; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 80e61a5545fea..9e6fafc481db6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc.kerberos; +import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -13,28 +14,27 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.attribute.AclEntry; -import java.nio.file.attribute.AclEntryPermission; -import java.nio.file.attribute.AclEntryType; -import java.nio.file.attribute.AclFileAttributeView; -import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; -import java.nio.file.attribute.UserPrincipal; import java.util.Arrays; -import java.util.List; +import java.util.EnumSet; import java.util.Locale; import java.util.Set; @@ -110,52 +110,47 @@ public void testLookupUser() { assertThat(future.actionGet(), is(nullValue())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32768") - public void testKerberosRealmWithInvalidKeytabPathConfigurations() throws IOException { - final String keytabPathCase = randomFrom("keytabPathAsDirectory", "keytabFileDoesNotExist", "keytabPathWithNoReadPermissions"); - final String expectedErrorMessage; - final String keytabPath; - switch (keytabPathCase) { - case "keytabPathAsDirectory": - final String dirName = randomAlphaOfLength(5); - Files.createDirectory(dir.resolve(dirName)); - keytabPath = dir.resolve(dirName).toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; - break; - case "keytabFileDoesNotExist": - keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; - break; - case "keytabPathWithNoReadPermissions": - final String fileName = randomAlphaOfLength(5); - final Path keytabFilePath = Files.createTempFile(dir, fileName, ".keytab"); - Files.write(keytabFilePath, randomAlphaOfLength(5).getBytes(StandardCharsets.UTF_8)); - final Set supportedAttributes = keytabFilePath.getFileSystem().supportedFileAttributeViews(); - if (supportedAttributes.contains("posix")) { - final PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(keytabFilePath, PosixFileAttributeView.class); - fileAttributeView.setPermissions(PosixFilePermissions.fromString("---------")); - } else if (supportedAttributes.contains("acl")) { - final UserPrincipal principal = Files.getOwner(keytabFilePath); - final AclFileAttributeView view = Files.getFileAttributeView(keytabFilePath, AclFileAttributeView.class); - final AclEntry entry = AclEntry.newBuilder() - .setType(AclEntryType.DENY) - .setPrincipal(principal) - .setPermissions(AclEntryPermission.READ_DATA, AclEntryPermission.READ_ATTRIBUTES).build(); - final List acl = view.getAcl(); - acl.add(0, entry); - view.setAcl(acl); - } else { - throw new UnsupportedOperationException( - String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", - keytabFilePath, supportedAttributes)); + public void testKerberosRealmThrowsErrorWhenKeytabPathIsConfiguredAsDirectory() throws IOException { + final String dirName = randomAlphaOfLength(5); + Files.createDirectory(dir.resolve(dirName)); + final String keytabPath = dir.resolve(dirName).toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] is a directory"; + + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileDoesNotExist() throws IOException { + final String keytabPath = dir.resolve(randomAlphaOfLength(5) + ".keytab").toString(); + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] does not exist"; + + assertKerberosRealmConstructorFails(keytabPath, expectedErrorMessage); + } + + public void testKerberosRealmThrowsErrorWhenKeytabFileHasNoReadPermissions() throws IOException { + assumeFalse("Not running this test on Windows, as it requires additional access permissions for test framework.", + Constants.WINDOWS); + final Set supportedAttributes = dir.getFileSystem().supportedFileAttributeViews(); + final String keytabFileName = randomAlphaOfLength(5) + ".keytab"; + final Path keytabPath; + if (supportedAttributes.contains("posix")) { + final Set filePerms = PosixFilePermissions.fromString("---------"); + final FileAttribute> fileAttributes = PosixFilePermissions.asFileAttribute(filePerms); + try (SeekableByteChannel byteChannel = Files.newByteChannel(dir.resolve(keytabFileName), + EnumSet.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), fileAttributes)) { + byteChannel.write(ByteBuffer.wrap(randomByteArrayOfLength(10))); } - keytabPath = keytabFilePath.toString(); - expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; - break; - default: - throw new IllegalArgumentException("Unknown test case :" + keytabPathCase); + keytabPath = dir.resolve(keytabFileName); + } else { + throw new UnsupportedOperationException( + String.format(Locale.ROOT, "Don't know how to make file [%s] non-readable on a file system with attributes [%s]", + dir.resolve(keytabFileName), supportedAttributes)); } + final String expectedErrorMessage = "configured service key tab file [" + keytabPath + "] must have read permission"; + + assertKerberosRealmConstructorFails(keytabPath.toString(), expectedErrorMessage); + } + private void assertKerberosRealmConstructorFails(final String keytabPath, final String expectedErrorMessage) { settings = KerberosTestCase.buildKerberosRealmSettings(keytabPath, 100, "10m", true, randomBoolean()); config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index cf9c09759ea09..c0867875b0181 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -46,12 +46,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; -import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java index 76568d3d48b5a..a88dafece3251 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -102,6 +102,7 @@ public void testEmptyAuthorizedIndicesSearchForAll() { assertNoSearchHits(client().prepareSearch().get()); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/33123") public void testEmptyAuthorizedIndicesSearchForAllDisallowNoIndices() { createIndicesWithRandomAliases("index1", "index2"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java index 07686838ad0e2..08e4b1123c70a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -18,10 +19,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.hamcrest.Matchers; @@ -208,7 +210,10 @@ public void testParse() throws Exception { } public void testSerialization() throws Exception { + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, null); + logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() .indices("i1", "i2") @@ -235,6 +240,7 @@ public void testSerialization() throws Exception { final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), registry); + streamInput.setVersion(version); final RoleDescriptor serialized = RoleDescriptor.readFrom(streamInput); assertEquals(descriptor, serialized); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 825ce4ee44c60..34a0685c2fd21 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -195,7 +195,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { assertEquals(readIndicesPrivileges, indicesPrivileges.build()); out = new BytesStreamOutput(); - out.setVersion(Version.V_5_0_0); + out.setVersion(Version.V_6_0_0); indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); indicesPrivileges.grantedFields(allowed); indicesPrivileges.deniedFields(denied); @@ -205,7 +205,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { indicesPrivileges.build().writeTo(out); out.close(); in = out.bytes().streamInput(); - in.setVersion(Version.V_5_0_0); + in.setVersion(Version.V_6_0_0); RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = RoleDescriptor.IndicesPrivileges.createFrom(in); assertEquals(readIndicesPrivileges, readIndicesPrivileges2); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 7d10198c6aea8..c3a6d7e920d1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -347,10 +347,10 @@ public void testIndexTemplateVersionMatching() throws Exception { assertTrue(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::before)); + Version.V_6_0_0::before)); assertFalse(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_TEMPLATE_NAME, clusterState, logger, - Version.V_5_0_0::after)); + Version.V_6_0_0::after)); } public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { @@ -448,4 +448,4 @@ private static IndexTemplateMetaData.Builder getIndexTemplateMetaData(String tem } return templateBuilder; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java index 08a991eb3ec29..bf8d8042546fd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.protocol.xpack.security.User; import org.elasticsearch.xpack.core.security.user.XPackUser; @@ -37,12 +36,10 @@ import java.io.IOException; import java.util.Collections; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.mock.orig.Mockito.times; import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; -import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; @@ -220,47 +217,6 @@ public void testNodeProfileAllowsNodeActions() throws Exception { verifyNoMoreInteractions(authcService, authzService); } - public void testHandlesKibanaUserCompatibility() throws Exception { - TransportRequest request = mock(TransportRequest.class); - User user = new User("kibana", "kibana"); - Authentication authentication = mock(Authentication.class); - final Version version = Version.fromId(randomIntBetween(Version.V_5_0_0_ID, Version.V_5_2_0_ID - 100)); - when(authentication.getVersion()).thenReturn(version); - when(authentication.getUser()).thenReturn(user); - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[3]; - callback.onResponse(authentication); - return Void.TYPE; - }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); - AtomicReference rolesRef = new AtomicReference<>(); - final Role empty = Role.EMPTY; - doAnswer((i) -> { - ActionListener callback = - (ActionListener) i.getArguments()[1]; - rolesRef.set(((User) i.getArguments()[0]).roles()); - callback.onResponse(empty); - return Void.TYPE; - }).when(authzService).roles(any(User.class), any(ActionListener.class)); - ServerTransportFilter filter = getClientOrNodeFilter(); - PlainActionFuture future = new PlainActionFuture<>(); - when(channel.getVersion()).thenReturn(version); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - - // test with a version that doesn't need changing - filter = getClientOrNodeFilter(); - rolesRef.set(null); - user = new KibanaUser(true); - when(authentication.getUser()).thenReturn(user); - when(authentication.getVersion()).thenReturn(Version.V_5_2_0); - future = new PlainActionFuture<>(); - filter.inbound("_action", request, channel, future); - assertNotNull(rolesRef.get()); - assertThat(rolesRef.get(), arrayContaining("kibana_system")); - } - private ServerTransportFilter getClientOrNodeFilter() throws IOException { return randomBoolean() ? getNodeFilter(true) : getClientFilter(true); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java index 6bea620982fac..0d5941eaf2674 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserSerializationTests.java @@ -5,9 +5,7 @@ */ package org.elasticsearch.xpack.security.user; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.user.ElasticUser; import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; @@ -60,46 +58,6 @@ public void testWriteToAndReadFromWithRunAs() throws Exception { assertThat(readFromAuthUser.authenticatedUser(), is(authUser)); } - public void testRunAsBackcompatRead() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - input.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - - public void testRunAsBackcompatWrite() throws Exception { - User user = new User(randomAlphaOfLengthBetween(4, 30), - randomBoolean() ? generateRandomStringArray(20, 30, false) : null); - // store the runAs user as the "authenticationUser" here to mimic old format for writing - User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); - - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); - User.writeTo(authUser, output); - StreamInput input = output.bytes().streamInput(); - User readFrom = User.readFrom(input); - - assertThat(readFrom.principal(), is(user.principal())); - assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); - User readFromAuthUser = readFrom.authenticatedUser(); - assertThat(authUser, is(notNullValue())); - assertThat(readFromAuthUser.principal(), is(authUser.principal())); - assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); - } - public void testSystemUserReadAndWrite() throws Exception { BytesStreamOutput output = new BytesStreamOutput(); diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 039e78c14952c..62097e76b97ea 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -19,7 +19,8 @@ archivesBaseName = 'x-pack-sql' integTest.enabled = false dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly(project(':modules:lang-painless')) { // exclude ASM to not affect featureAware task on Java 10+ exclude group: "org.ow2.asm" diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a0d9b24c50729..1a7d6115e1556 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -8,7 +8,7 @@ archivesBaseName = "x-pack-sql-jdbc" forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencies { diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index bf79fd824ef8d..345318d20b803 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -2,9 +2,6 @@ /* * This project contains transport-level requests and responses that are shared between x-pack plugin and qa tests */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -34,7 +31,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index b90b07abad3d1..0b2559c6a84aa 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask /* * This project is named sql-cli because it is in the "org.elasticsearch.plugin" @@ -74,11 +75,8 @@ artifacts { } -forbiddenApisMain { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() -} -forbiddenApisTest { - signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() +tasks.withType(ForbiddenApisCliTask) { + signaturesFiles += files('src/forbidden/cli-signatures.txt') } thirdPartyAudit.excludes = [ diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index fbc411e44596d..c4ee030d4568f 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -26,7 +26,7 @@ dependencyLicenses { forbiddenApisMain { // does not depend on core, so only jdk and http signatures should be checked - signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } forbiddenApisTest { diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index 7f26176e3c7a7..7d28336bfc51f 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -2,9 +2,6 @@ /* * This project contains XContent protocol classes shared between server and http client */ - -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' description = 'Request and response objects shared by the cli, jdbc ' + @@ -25,7 +22,7 @@ dependencies { forbiddenApisMain { //sql does not depend on server, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + replaceSignatureFiles 'jdk-signatures' } dependencyLicenses { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java index a4d9d4cb57ab6..d3336ec89a840 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java @@ -75,17 +75,27 @@ public void testLCase() { stringCharInputValidation(proc); } - public void testLCaseWithTRLocale() { + public void testLCaseWithAZTRLocale() { + Locale initialLocale = Locale.getDefault(); Locale.setDefault(Locale.forLanguageTag("tr")); - StringProcessor proc = new StringProcessor(StringOperation.LCASE); - // ES-SQL is not locale sensitive (so far). The obvious test for this is the Turkish language, uppercase letter I conversion - // in non-Turkish locale the lowercasing would create i and an additional dot, while in Turkish Locale it would only create "i" - // unicode 0069 = i - assertEquals("\u0069\u0307", proc.process("\u0130")); - // unicode 0049 = I (regular capital letter i) - // in Turkish locale this would be lowercased to a "i" without dot (unicode 0131) - assertEquals("\u0069", proc.process("\u0049")); + try { + StringProcessor proc = new StringProcessor(StringOperation.LCASE); + // ES-SQL is not locale sensitive (so far). The obvious test for this is the Turkish language, uppercase letter I conversion + // in non-Turkish locale the lowercasing would create i and an additional dot, while in Turkish Locale it would only create "i" + // unicode 0069 = i + assertEquals("\u0069\u0307", proc.process("\u0130")); + // unicode 0049 = I (regular capital letter i) + // in Turkish locale this would be lowercased to a "i" without dot (unicode 0131) + assertEquals("\u0069", proc.process("\u0049")); + + Locale.setDefault(Locale.forLanguageTag("az")); + assertEquals("\u0069\u0307", proc.process("\u0130")); + assertEquals("\u0069", proc.process("\u0049")); + } finally { + // restore the original Locale + Locale.setDefault(initialLocale); + } } public void testUCase() { @@ -102,13 +112,22 @@ public void testUCase() { stringCharInputValidation(proc); } - public void testUCaseWithTRLocale() { + public void testUCaseWithAZTRLocale() { + Locale initialLocale = Locale.getDefault(); Locale.setDefault(Locale.forLanguageTag("tr")); - StringProcessor proc = new StringProcessor(StringOperation.UCASE); - - // ES-SQL is not Locale sensitive (so far). - // in Turkish locale, small letter "i" is uppercased to "I" with a dot above (unicode 130), otherwise in "i" (unicode 49) - assertEquals("\u0049", proc.process("\u0069")); + + try { + StringProcessor proc = new StringProcessor(StringOperation.UCASE); + // ES-SQL is not Locale sensitive (so far). + // in Turkish locale, small letter "i" is uppercased to "I" with a dot above (unicode 130), otherwise in "i" (unicode 49) + assertEquals("\u0049", proc.process("\u0069")); + + Locale.setDefault(Locale.forLanguageTag("az")); + assertEquals("\u0049", proc.process("\u0069")); + } finally { + // restore the original Locale + Locale.setDefault(initialLocale); + } } public void testLength() { @@ -179,7 +198,7 @@ public void testCharLength() { assertEquals(7, proc.process("foo bar")); assertEquals(0, proc.process("")); assertEquals(1, proc.process('f')); - assertEquals(1, proc.process('€')); + assertEquals(1, proc.process('\u20ac')); // euro symbol stringCharInputValidation(proc); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json index 26c72666e8fa4..4c1df6b99db79 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.delete_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-delete-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json index 0bdeb54cfb678..7696f6671e489 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.get_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-get-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json index 64b15ae9c0222..9c75b40e4d1a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.has_privileges.json @@ -1,6 +1,6 @@ { "xpack.security.has_privileges": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-privileges.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html", "methods": [ "GET", "POST" ], "url": { "path": "/_xpack/security/user/_has_privileges", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json index 3f92cd130bab4..98e723d80e9b0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json @@ -1,6 +1,6 @@ { "xpack.security.put_role_mapping": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-put-role-mapping", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/role_mapping/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index d401d5c69bacb..e2f1174665ea6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -152,6 +152,20 @@ setup: - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } - match: { aggregations.histo.buckets.3.doc_count: 20 } +--- +"Empty aggregation": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: {} + + - length: { hits.hits: 0 } + - match: { hits.total: 0 } + - is_false: aggregations + --- "Search with Metric": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml new file mode 100644 index 0000000000000..57bfd821ea24d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -0,0 +1,343 @@ +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "foo_only_access" + ignore: 404 + +--- +"Index-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["foo"], "privileges": ["all"] }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + + - do: + indices.create: + index: foobar + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # This index pattern will match both indices, but we only have permission to read one + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo*", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" + + +--- +"Attribute-based access": + + - do: + xpack.security.put_role: + name: "foo_only_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { + "names": ["foo"], + "privileges": ["all"], + "query": { + "template": { + "source": "{\"bool\":{\"filter\":[{\"term\":{\"visibility\":\"public\"}}]}}" + } + } + }, + { "names": ["rollup"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "foo_only_access" ], + "full_name" : "foo only" + } + + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + value_field: + type: integer + visibility: + type: keyword + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: _doc + body: + timestamp: 123 + value_field: 1232 + visibility: "public" + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foobar + type: _doc + body: + timestamp: 123 + value_field: 456 + visibility: "private" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: foo + + # Index contains two docs, but we should only be able to see one of them + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "rollup", + "cron": "*/1 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1s" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.refresh: + index: rollup + + # this is a hacky way to sleep for 5s, since we will never have 10 nodes + - do: + catch: request_timeout + cluster.health: + wait_for_nodes: 10 + timeout: "5s" + - match: + timed_out: true + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + xpack.rollup.get_jobs: + id: foo + - match: + jobs.0.stats.documents_processed: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: foo + body: + query: + match_all: {} + + - match: + hits.total: 1 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: rollup + body: + query: + match_all: {} + + - match: + hits.total: 1 + - match: + hits.hits.0._id: "foo$VxMkzTqILshClbtbFi4-rQ" + - match: + hits.hits.0._source: + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram.timestamp: 0 + value_field.max.value: 1232.0 + _rollup.version: 2 + timestamp.date_histogram.interval: "1s" + value_field.sum.value: 1232.0 + value_field.min.value: 1232.0 + timestamp.date_histogram._count: 1 + _rollup.id: "foo" diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle index f95cde7134c56..56ce274dd1166 100644 --- a/x-pack/plugin/upgrade/build.gradle +++ b/x-pack/plugin/upgrade/build.gradle @@ -14,7 +14,8 @@ esplugin { archivesBaseName = 'x-pack-upgrade' dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java index 07017e6fc0014..ad0ebd6815f2d 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java @@ -79,7 +79,7 @@ private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String in } } // Catch all check for all indices that didn't match the specific checks - if (indexMetaData.getCreationVersion().before(Version.V_5_0_0)) { + if (indexMetaData.getCreationVersion().before(Version.V_6_0_0)) { return UpgradeActionRequired.REINDEX; } else { return null; diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java index 568397e37395a..e454ac4a0140b 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java @@ -44,7 +44,7 @@ public class Upgrade extends Plugin implements ActionPlugin { - public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0; + public static final Version UPGRADE_INTRODUCED = Version.CURRENT.minimumCompatibilityVersion(); private final Settings settings; private final List> upgradeCheckFactories; diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java index 5939777572b48..f980450c07f7c 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java @@ -166,7 +166,7 @@ public static IndexMetaData newTestIndexMeta(String name, String alias, Settings .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0) .put(indexSettings) .build(); IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(build); diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java index cd83803d1884c..71e3348b058b6 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java @@ -206,9 +206,9 @@ private ClusterState withRandomOldNode() { DiscoveryNode node = discoveryNodes.get(nodeId); DiscoveryNode newNode = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), - randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_4_0)); + randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); return ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(discoveryNodes).remove(node).add(newNode)).build(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index a0feab6746359..3a9d759c46d12 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -25,7 +25,8 @@ dependencyLicenses { dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index 33b1217895dca..d22d402aa157e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -61,14 +61,18 @@ void execute(Terminal terminal, String expression, int count) throws Exception { Cron cron = new Cron(expression); long time = date.getMillis(); + for (int i = 0; i < count; i++) { long prevTime = time; time = cron.getNextValidTimeAfter(time); if (time < 0) { - throw new UserException(ExitCodes.OK, (i + 1) + ".\t Could not compute future times since [" - + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + if (i == 0) { + throw new UserException(ExitCodes.OK, "Could not compute future times since [" + + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + } + break; } - terminal.println((i+1) + ".\t" + formatter.print(time)); + terminal.println((i + 1) + ".\t" + formatter.print(time)); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java index 2238842494817..f1e864d547c83 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java @@ -8,6 +8,13 @@ import org.elasticsearch.cli.Command; import org.elasticsearch.cli.CommandTestCase; +import java.util.Calendar; +import java.util.Locale; +import java.util.TimeZone; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + public class CronEvalToolTests extends CommandTestCase { @Override protected Command newCommand() { @@ -18,6 +25,27 @@ public void testParse() throws Exception { String countOption = randomBoolean() ? "-c" : "--count"; int count = randomIntBetween(1, 100); String output = execute(countOption, Integer.toString(count), "0 0 0 1-6 * ?"); - assertTrue(output, output.contains("Here are the next " + count + " times this cron expression will trigger")); + assertThat(output, containsString("Here are the next " + count + " times this cron expression will trigger")); + } + + public void testGetNextValidTimes() throws Exception { + final int year = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT).get(Calendar.YEAR) + 1; + { + String output = execute("0 3 23 8 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + assertThat(output, not(containsString("2.\t"))); + } + { + String output = execute("0 3 23 */4 9 ? " + year); + assertThat(output, containsString("Here are the next 10 times this cron expression will trigger:")); + assertThat(output, not(containsString("ERROR"))); + } + { + Exception expectThrows = expectThrows(Exception.class, () -> execute("0 3 23 */4 9 ? 2017")); + String message = expectThrows.getMessage(); + assertThat(message, containsString("Could not compute future times since")); + assertThat(message, containsString("(perhaps the cron expression only points to times in the past?)")); + } } } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 3b9032f092185..1d3e51c11e027 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -412,8 +411,7 @@ public FeatureSet(String name, @Nullable String description, boolean available, } public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_5_4_0) ? in.readMap() : null); + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); } @Override @@ -422,9 +420,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(description); out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeMap(nativeCodeInfo); - } + out.writeMap(nativeCodeInfo); } public String name() { diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java new file mode 100644 index 0000000000000..455434f7ac4a9 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java @@ -0,0 +1,229 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A Connection links exactly two {@link Vertex} objects. The basis of a + * connection is one or more documents have been found that contain + * this pair of terms and the strength of the connection is recorded + * as a weight. + */ +public class Connection { + private Vertex from; + private Vertex to; + private double weight; + private long docCount; + + public Connection(Vertex from, Vertex to, double weight, long docCount) { + this.from = from; + this.to = to; + this.weight = weight; + this.docCount = docCount; + } + + public Connection(StreamInput in, Map vertices) throws IOException { + from = vertices.get(new VertexId(in.readString(), in.readString())); + to = vertices.get(new VertexId(in.readString(), in.readString())); + weight = in.readDouble(); + docCount = in.readVLong(); + } + + Connection() { + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(from.getField()); + out.writeString(from.getTerm()); + out.writeString(to.getField()); + out.writeString(to.getTerm()); + out.writeDouble(weight); + out.writeVLong(docCount); + } + + public ConnectionId getId() { + return new ConnectionId(from.getId(), to.getId()); + } + + public Vertex getFrom() { + return from; + } + + public Vertex getTo() { + return to; + } + + /** + * @return a measure of the relative connectedness between a pair of {@link Vertex} objects + */ + public double getWeight() { + return weight; + } + + /** + * @return the number of documents in the sampled set that contained this + * pair of {@link Vertex} objects. + */ + public long getDocCount() { + return docCount; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Connection other = (Connection) obj; + return docCount == other.docCount && + weight == other.weight && + Objects.equals(to, other.to) && + Objects.equals(from, other.from); + } + + @Override + public int hashCode() { + return Objects.hash(docCount, weight, from, to); + } + + + private static final ParseField SOURCE = new ParseField("source"); + private static final ParseField TARGET = new ParseField("target"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DOC_COUNT = new ParseField("doc_count"); + + + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { + builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from)); + builder.field(TARGET.getPreferredName(), vertexNumbers.get(to)); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DOC_COUNT.getPreferredName(), docCount); + } + + //When deserializing from XContent we need to wait for all vertices to be loaded before + // Connection objects can be created that reference them. This class provides the interim + // state for connections. + static class UnresolvedConnection { + int fromIndex; + int toIndex; + double weight; + long docCount; + UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) { + super(); + this.fromIndex = fromIndex; + this.toIndex = toIndex; + this.weight = weight; + this.docCount = docCount; + } + public Connection resolve(List vertices) { + return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ConnectionParser", true, + args -> { + int source = (Integer) args[0]; + int target = (Integer) args[1]; + double weight = (Double) args[2]; + long docCount = (Long) args[3]; + return new UnresolvedConnection(source, target, weight, docCount); + }); + + static { + PARSER.declareInt(constructorArg(), SOURCE); + PARSER.declareInt(constructorArg(), TARGET); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareLong(constructorArg(), DOC_COUNT); + } + static UnresolvedConnection fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + } + + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Connection} + */ + public static class ConnectionId { + private final VertexId source; + private final VertexId target; + + public ConnectionId(VertexId source, VertexId target) { + this.source = source; + this.target = target; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ConnectionId vertexId = (ConnectionId) o; + + if (source != null ? !source.equals(vertexId.source) : vertexId.source != null) + return false; + if (target != null ? !target.equals(vertexId.target) : vertexId.target != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = source != null ? source.hashCode() : 0; + result = 31 * result + (target != null ? target.hashCode() : 0); + return result; + } + + public VertexId getSource() { + return source; + } + + public VertexId getTarget() { + return target; + } + + @Override + public String toString() { + return getSource() + "->" + getTarget(); + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java new file mode 100644 index 0000000000000..495ea5fd28ac3 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -0,0 +1,401 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +/** + * Holds the criteria required to guide the exploration of connected terms which + * can be returned as a graph. + */ +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { + + public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; + public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private TimeValue timeout; + + private int sampleSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE; + private String sampleDiversityField; + private int maxDocsPerDiversityValue; + private boolean useSignificance = true; + private boolean returnDetailedInfo; + + private List hops = new ArrayList<>(); + + public GraphExploreRequest() { + } + + /** + * Constructs a new graph request to run against the provided indices. No + * indices means it will run against all indices. + */ + public GraphExploreRequest(String... indices) { + this.indices = indices; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (hops.size() == 0) { + validationException = ValidateActions.addValidationError(NO_HOPS_ERROR_MESSAGE, validationException); + } + for (Hop hop : hops) { + validationException = hop.validate(validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return this.indices; + } + + @Override + public GraphExploreRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { + if (indicesOptions == null) { + throw new IllegalArgumentException("IndicesOptions must not be null"); + } + this.indicesOptions = indicesOptions; + return this; + } + + public String[] types() { + return this.types; + } + + public GraphExploreRequest types(String... types) { + this.types = types; + return this; + } + + public String routing() { + return this.routing; + } + + public GraphExploreRequest routing(String routing) { + this.routing = routing; + return this; + } + + public GraphExploreRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + public TimeValue timeout() { + return timeout; + } + + /** + * Graph exploration can be set to timeout after the given period. Search + * operations involved in each hop are limited to the remaining time + * available but can still overrun due to the nature of their "best efforts" + * timeout support. When a timeout occurs partial results are returned. + * + * @param timeout + * a {@link TimeValue} object which determines the maximum length + * of time to spend exploring + */ + public GraphExploreRequest timeout(TimeValue timeout) { + if (timeout == null) { + throw new IllegalArgumentException("timeout must not be null"); + } + this.timeout = timeout; + return this; + } + + public GraphExploreRequest timeout(String timeout) { + timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + types = in.readStringArray(); + routing = in.readOptionalString(); + timeout = in.readOptionalTimeValue(); + sampleSize = in.readInt(); + sampleDiversityField = in.readOptionalString(); + maxDocsPerDiversityValue = in.readInt(); + + useSignificance = in.readBoolean(); + returnDetailedInfo = in.readBoolean(); + + int numHops = in.readInt(); + Hop parentHop = null; + for (int i = 0; i < numHops; i++) { + Hop hop = new Hop(parentHop); + hop.readFrom(in); + hops.add(hop); + parentHop = hop; + } + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeStringArray(types); + out.writeOptionalString(routing); + out.writeOptionalTimeValue(timeout); + + out.writeInt(sampleSize); + out.writeOptionalString(sampleDiversityField); + out.writeInt(maxDocsPerDiversityValue); + + out.writeBoolean(useSignificance); + out.writeBoolean(returnDetailedInfo); + out.writeInt(hops.size()); + for (Iterator iterator = hops.iterator(); iterator.hasNext();) { + Hop hop = iterator.next(); + hop.writeTo(out); + } + } + + @Override + public String toString() { + return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]"; + } + + /** + * The number of top-matching documents that are considered during each hop + * (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient + * weight-of-evidence to identify significant connections between terms. + *

+ * Very large values (many thousands) are not recommended with loosely + * defined queries (fuzzy queries or those with many OR clauses). This is + * because any useful signals in the best documents are diluted with + * irrelevant noise from low-quality matches. Performance is also typically + * better with smaller samples as there are less look-ups required for + * background frequencies of terms found in the documents + *

+ * + * @param maxNumberOfDocsPerHop + * shard-level sample size in documents + */ + public void sampleSize(int maxNumberOfDocsPerHop) { + sampleSize = maxNumberOfDocsPerHop; + } + + public int sampleSize() { + return sampleSize; + } + + /** + * Optional choice of single-value field on which to diversify sampled + * search results + */ + public void sampleDiversityField(String name) { + sampleDiversityField = name; + } + + public String sampleDiversityField() { + return sampleDiversityField; + } + + /** + * Optional number of permitted docs with same value in sampled search + * results. Must also declare which field using sampleDiversityField + */ + public void maxDocsPerDiversityValue(int maxDocs) { + this.maxDocsPerDiversityValue = maxDocs; + } + + public int maxDocsPerDiversityValue() { + return maxDocsPerDiversityValue; + } + + /** + * Controls the choice of algorithm used to select interesting terms. The + * default value is true which means terms are selected based on + * significance (see the {@link SignificantTerms} aggregation) rather than + * popularity (using the {@link TermsAggregator}). + * + * @param value + * true if the significant_terms algorithm should be used. + */ + public void useSignificance(boolean value) { + this.useSignificance = value; + } + + public boolean useSignificance() { + return useSignificance; + } + + /** + * Return detailed information about vertex frequencies as part of JSON + * results - defaults to false + * + * @param value + * true if detailed information is required in JSON responses + */ + public void returnDetailedInfo(boolean value) { + this.returnDetailedInfo = value; + } + + public boolean returnDetailedInfo() { + return returnDetailedInfo; + } + + /** + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected to + * other terms in a subsequent hop. + * + * @param guidingQuery + * optional choice of query which influences which documents are + * considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph + * exploration + */ + public Hop createNextHop(QueryBuilder guidingQuery) { + Hop parent = null; + if (hops.size() > 0) { + parent = hops.get(hops.size() - 1); + } + Hop newHop = new Hop(parent); + newHop.guidingQuery = guidingQuery; + hops.add(newHop); + return newHop; + } + + public int getHopNumbers() { + return hops.size(); + } + + public Hop getHop(int hopNumber) { + return hops.get(hopNumber); + } + + public static class TermBoost { + String term; + float boost; + + public TermBoost(String term, float boost) { + super(); + this.term = term; + if (boost <= 0) { + throw new IllegalArgumentException("Boosts must be a positive non-zero number"); + } + this.boost = boost; + } + + TermBoost() { + } + + public String getTerm() { + return term; + } + + public float getBoost() { + return boost; + } + + void readFrom(StreamInput in) throws IOException { + this.term = in.readString(); + this.boost = in.readFloat(); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(term); + out.writeFloat(boost); + } + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject("controls"); + { + if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) { + builder.field("sample_size", sampleSize); + } + if (sampleDiversityField != null) { + builder.startObject("sample_diversity"); + builder.field("field", sampleDiversityField); + builder.field("max_docs_per_value", maxDocsPerDiversityValue); + builder.endObject(); + } + builder.field("use_significance", useSignificance); + if (returnDetailedInfo) { + builder.field("return_detailed_stats", returnDetailedInfo); + } + } + builder.endObject(); + + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.startObject("connections"); + } + hop.toXContent(builder, params); + } + for (Hop hop : hops) { + if (hop.parentHop != null) { + builder.endObject(); + } + } + builder.endObject(); + + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java new file mode 100644 index 0000000000000..baaaedf0163ed --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; +import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; +import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects + * (nodes and edges in common graph parlance). + * + * @see GraphExploreRequest + */ +public class GraphExploreResponse extends ActionResponse implements ToXContentObject { + + private long tookInMillis; + private boolean timedOut = false; + private ShardOperationFailedException[] shardFailures = ShardSearchFailure.EMPTY_ARRAY; + private Map vertices; + private Map connections; + private boolean returnDetailedInfo; + static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo"; + + public GraphExploreResponse() { + } + + public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, + Map vertices, Map connections, boolean returnDetailedInfo) { + this.tookInMillis = tookInMillis; + this.timedOut = timedOut; + this.shardFailures = shardFailures; + this.vertices = vertices; + this.connections = connections; + this.returnDetailedInfo = returnDetailedInfo; + } + + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded + * (not all hops may have been completed in this case) + */ + public boolean isTimedOut() { + return this.timedOut; + } + public ShardOperationFailedException[] getShardFailures() { + return shardFailures; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tookInMillis = in.readVLong(); + timedOut = in.readBoolean(); + + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = readShardSearchFailure(in); + } + } + // read vertices + size = in.readVInt(); + vertices = new HashMap<>(); + for (int i = 0; i < size; i++) { + Vertex n = Vertex.readFrom(in); + vertices.put(n.getId(), n); + } + + size = in.readVInt(); + + connections = new HashMap<>(); + for (int i = 0; i < size; i++) { + Connection e = new Connection(in, vertices); + connections.put(e.getId(), e); + } + + returnDetailedInfo = in.readBoolean(); + + } + + public Collection getConnections() { + return connections.values(); + } + + public Collection getVertices() { + return vertices.values(); + } + + public Vertex getVertex(VertexId id) { + return vertices.get(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(tookInMillis); + out.writeBoolean(timedOut); + + out.writeVInt(shardFailures.length); + for (ShardOperationFailedException shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + + out.writeVInt(vertices.size()); + for (Vertex vertex : vertices.values()) { + vertex.writeTo(out); + } + + out.writeVInt(connections.size()); + for (Connection connection : connections.values()) { + connection.writeTo(out); + } + + out.writeBoolean(returnDetailedInfo); + + } + + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField VERTICES = new ParseField("vertices"); + private static final ParseField CONNECTIONS = new ParseField("connections"); + private static final ParseField FAILURES = new ParseField("failures"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), timedOut); + + builder.startArray(FAILURES.getPreferredName()); + if (shardFailures != null) { + for (ShardOperationFailedException shardFailure : shardFailures) { + builder.startObject(); + shardFailure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + + ObjectIntHashMap vertexNumbers = new ObjectIntHashMap<>(vertices.size()); + + Map extraParams = new HashMap<>(); + extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); + Params extendedParams = new DelegatingMapParams(extraParams, params); + + builder.startArray(VERTICES.getPreferredName()); + for (Vertex vertex : vertices.values()) { + builder.startObject(); + vertexNumbers.put(vertex, vertexNumbers.size()); + vertex.toXContent(builder, extendedParams); + builder.endObject(); + } + builder.endArray(); + + builder.startArray(CONNECTIONS.getPreferredName()); + for (Connection connection : connections.values()) { + builder.startObject(); + connection.toXContent(builder, extendedParams, vertexNumbers); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GraphExploreResponsenParser", true, + args -> { + GraphExploreResponse result = new GraphExploreResponse(); + result.vertices = new HashMap<>(); + result.connections = new HashMap<>(); + + result.tookInMillis = (Long) args[0]; + result.timedOut = (Boolean) args[1]; + + @SuppressWarnings("unchecked") + List vertices = (List) args[2]; + @SuppressWarnings("unchecked") + List unresolvedConnections = (List) args[3]; + @SuppressWarnings("unchecked") + List failures = (List) args[4]; + for (Vertex vertex : vertices) { + // reverse-engineer if detailed stats were requested - + // mainly here for testing framework's equality tests + result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; + result.vertices.put(vertex.getId(), vertex); + } + for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { + Connection resolvedConnection = unresolvedConnection.resolve(vertices); + result.connections.put(resolvedConnection.getId(), resolvedConnection); + } + if (failures.size() > 0) { + result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); + } + return result; + }); + + static { + PARSER.declareLong(constructorArg(), TOOK); + PARSER.declareBoolean(constructorArg(), TIMED_OUT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); + } + + public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java new file mode 100644 index 0000000000000..70ec61067f5b8 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A Hop represents one of potentially many stages in a graph exploration. + * Each Hop identifies one or more fields in which it will attempt to find + * terms that are significantly connected to the previous Hop. Each field is identified + * using a {@link VertexRequest} + * + *

An example series of Hops on webserver logs would be: + *

    + *
  1. an initial Hop to find + * the top ten IPAddresses trying to access urls containing the word "admin"
  2. + *
  3. a secondary Hop to see which other URLs those IPAddresses were trying to access
  4. + *
+ * + *

+ * Optionally, each hop can contain a "guiding query" that further limits the set of documents considered. + * In our weblog example above we might choose to constrain the second hop to only look at log records that + * had a reponse code of 404. + *

+ *

+ * If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating + * the fields that will be examined at each stage. + *

+ * + */ +public class Hop implements ToXContentFragment{ + final Hop parentHop; + List vertices = null; + QueryBuilder guidingQuery = null; + + public Hop(Hop parent) { + this.parentHop = parent; + } + + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { + + if (getEffectiveVertexRequests().size() == 0) { + validationException = ValidateActions.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE, validationException); + } + return validationException; + + } + + public Hop getParentHop() { + return parentHop; + } + + void writeTo(StreamOutput out) throws IOException { + out.writeOptionalNamedWriteable(guidingQuery); + if (vertices == null) { + out.writeVInt(0); + } else { + out.writeVInt(vertices.size()); + for (VertexRequest vr : vertices) { + vr.writeTo(out); + } + } + } + + void readFrom(StreamInput in) throws IOException { + guidingQuery = in.readOptionalNamedWriteable(QueryBuilder.class); + int size = in.readVInt(); + if (size > 0) { + vertices = new ArrayList<>(); + for (int i = 0; i < size; i++) { + VertexRequest vr = new VertexRequest(); + vr.readFrom(in); + vertices.add(vr); + } + } + } + + public QueryBuilder guidingQuery() { + if (guidingQuery != null) { + return guidingQuery; + } + return QueryBuilders.matchAllQuery(); + } + + /** + * Add a field in which this {@link Hop} will look for terms that are highly linked to + * previous hops and optionally the guiding query. + * + * @param fieldName a field in the chosen index + */ + public VertexRequest addVertexRequest(String fieldName) { + if (vertices == null) { + vertices = new ArrayList<>(); + } + VertexRequest vr = new VertexRequest(); + vr.fieldName(fieldName); + vertices.add(vr); + return vr; + } + + /** + * An optional parameter that focuses the exploration on documents that + * match the given query. + * + * @param queryBuilder any query + */ + public void guidingQuery(QueryBuilder queryBuilder) { + guidingQuery = queryBuilder; + } + + protected List getEffectiveVertexRequests() { + if (vertices != null) { + return vertices; + } + if (parentHop == null) { + return Collections.emptyList(); + } + // otherwise inherit settings from parent + return parentHop.getEffectiveVertexRequests(); + } + + public int getNumberVertexRequests() { + return getEffectiveVertexRequests().size(); + } + + public VertexRequest getVertexRequest(int requestNumber) { + return getEffectiveVertexRequests().get(requestNumber); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (guidingQuery != null) { + builder.field("query"); + guidingQuery.toXContent(builder, params); + } + if(vertices != null && vertices.size()>0) { + builder.startArray("vertices"); + for (VertexRequest vertexRequest : vertices) { + vertexRequest.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java new file mode 100644 index 0000000000000..cfc26f44fac04 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A vertex in a graph response represents a single term (a field and value pair) + * which appears in one or more documents found as part of the graph exploration. + * + * A vertex term could be a bank account number, an email address, a hashtag or any + * other term that appears in documents and is interesting to represent in a network. + */ +public class Vertex implements ToXContentFragment { + + private final String field; + private final String term; + private double weight; + private final int depth; + private final long bg; + private long fg; + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField TERM = new ParseField("term"); + private static final ParseField WEIGHT = new ParseField("weight"); + private static final ParseField DEPTH = new ParseField("depth"); + private static final ParseField FG = new ParseField("fg"); + private static final ParseField BG = new ParseField("bg"); + + + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { + super(); + this.field = field; + this.term = term; + this.weight = weight; + this.depth = depth; + this.bg = bg; + this.fg = fg; + } + + static Vertex readFrom(StreamInput in) throws IOException { + return new Vertex(in.readString(), in.readString(), in.readDouble(), in.readVInt(), in.readVLong(), in.readVLong()); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeString(term); + out.writeDouble(weight); + out.writeVInt(depth); + out.writeVLong(bg); + out.writeVLong(fg); + } + + @Override + public int hashCode() { + return Objects.hash(field, term, weight, depth, bg, fg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Vertex other = (Vertex) obj; + return depth == other.depth && + weight == other.weight && + bg == other.bg && + fg == other.fg && + Objects.equals(field, other.field) && + Objects.equals(term, other.term); + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); + builder.field(FIELD.getPreferredName(), field); + builder.field(TERM.getPreferredName(), term); + builder.field(WEIGHT.getPreferredName(), weight); + builder.field(DEPTH.getPreferredName(), depth); + if (returnDetailedInfo) { + builder.field(FG.getPreferredName(), fg); + builder.field(BG.getPreferredName(), bg); + } + return builder; + } + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "VertexParser", true, + args -> { + String field = (String) args[0]; + String term = (String) args[1]; + double weight = (Double) args[2]; + int depth = (Integer) args[3]; + Long optionalBg = (Long) args[4]; + Long optionalFg = (Long) args[5]; + long bg = optionalBg == null ? 0 : optionalBg; + long fg = optionalFg == null ? 0 : optionalFg; + return new Vertex(field, term, weight, depth, bg, fg); + }); + + static { + PARSER.declareString(constructorArg(), FIELD); + PARSER.declareString(constructorArg(), TERM); + PARSER.declareDouble(constructorArg(), WEIGHT); + PARSER.declareInt(constructorArg(), DEPTH); + PARSER.declareLong(optionalConstructorArg(), BG); + PARSER.declareLong(optionalConstructorArg(), FG); + } + + static Vertex fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + + /** + * @return a {@link VertexId} object that uniquely identifies this Vertex + */ + public VertexId getId() { + return createId(field, term); + } + + /** + * A convenience method for creating a {@link VertexId} + * @param field the field + * @param term the term + * @return a {@link VertexId} that can be used for looking up vertices + */ + public static VertexId createId(String field, String term) { + return new VertexId(field,term); + } + + @Override + public String toString() { + return getId().toString(); + } + + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + + /** + * The weight of a vertex is an accumulation of all of the {@link Connection}s + * that are linked to this {@link Vertex} as part of a graph exploration. + * It is used internally to identify the most interesting vertices to be returned. + * @return a measure of the {@link Vertex}'s relative importance. + */ + public double getWeight() { + return weight; + } + + public void setWeight(final double weight) { + this.weight = weight; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * @return the number of documents in the index that contain this term (see bg_count in + *
+ * the significant_terms aggregation) + */ + public long getBg() { + return bg; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * Together with {@link #getBg()} these numbers are used to derive the significance of a term. + * @return the number of documents in the sample of best matching documents that contain this term (see fg_count in + * + * the significant_terms aggregation) + */ + public long getFg() { + return fg; + } + + public void setFg(final long fg) { + this.fg = fg; + } + + /** + * @return the sequence number in the series of hops where this Vertex term was first encountered + */ + public int getHopDepth() { + return depth; + } + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Vertex} + */ + public static class VertexId { + private final String field; + private final String term; + + public VertexId(String field, String term) { + this.field = field; + this.term = term; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + VertexId vertexId = (VertexId) o; + + if (field != null ? !field.equals(vertexId.field) : vertexId.field != null) + return false; + if (term != null ? !term.equals(vertexId.term) : vertexId.term != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = field != null ? field.hashCode() : 0; + result = 31 * result + (term != null ? term.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return field + ":" + term; + } + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java new file mode 100644 index 0000000000000..116497fe2301c --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * A request to identify terms from a choice of field as part of a {@link Hop}. + * Optionally, a set of terms can be provided that are used as an exclusion or + * inclusion list to filter which terms are considered. + * + */ +public class VertexRequest implements ToXContentObject { + private String fieldName; + private int size = DEFAULT_SIZE; + public static final int DEFAULT_SIZE = 5; + private Map includes; + private Set excludes; + public static final int DEFAULT_MIN_DOC_COUNT = 3; + private int minDocCount = DEFAULT_MIN_DOC_COUNT; + public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2; + private int shardMinDocCount = DEFAULT_SHARD_MIN_DOC_COUNT; + + + public VertexRequest() { + + } + + void readFrom(StreamInput in) throws IOException { + fieldName = in.readString(); + size = in.readVInt(); + minDocCount = in.readVInt(); + shardMinDocCount = in.readVInt(); + + int numIncludes = in.readVInt(); + if (numIncludes > 0) { + includes = new HashMap<>(); + for (int i = 0; i < numIncludes; i++) { + TermBoost tb = new TermBoost(); + tb.readFrom(in); + includes.put(tb.term, tb); + } + } + + int numExcludes = in.readVInt(); + if (numExcludes > 0) { + excludes = new HashSet<>(); + for (int i = 0; i < numExcludes; i++) { + excludes.add(in.readString()); + } + } + + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeVInt(size); + out.writeVInt(minDocCount); + out.writeVInt(shardMinDocCount); + + if (includes != null) { + out.writeVInt(includes.size()); + for (TermBoost tb : includes.values()) { + tb.writeTo(out); + } + } else { + out.writeVInt(0); + } + + if (excludes != null) { + out.writeVInt(excludes.size()); + for (String term : excludes) { + out.writeString(term); + } + } else { + out.writeVInt(0); + } + } + + public String fieldName() { + return fieldName; + } + + public VertexRequest fieldName(String fieldName) { + this.fieldName = fieldName; + return this; + } + + public int size() { + return size; + } + + /** + * @param size The maximum number of terms that should be returned from this field as part of this {@link Hop} + */ + public VertexRequest size(int size) { + this.size = size; + return this; + } + + public boolean hasIncludeClauses() { + return includes != null && includes.size() > 0; + } + + public boolean hasExcludeClauses() { + return excludes != null && excludes.size() > 0; + } + + /** + * Adds a term that should be excluded from results + * @param term A term to be excluded + */ + public void addExclude(String term) { + if (includes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (excludes == null) { + excludes = new HashSet<>(); + } + excludes.add(term); + } + + /** + * Adds a term to the set of allowed values - the boost defines the relative + * importance when pursuing connections in subsequent {@link Hop}s. The boost value + * appears as part of the query. + * @param term a required term + * @param boost an optional boost + */ + public void addInclude(String term, float boost) { + if (excludes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (includes == null) { + includes = new HashMap<>(); + } + includes.put(term, new TermBoost(term, boost)); + } + + public TermBoost[] includeValues() { + return includes.values().toArray(new TermBoost[includes.size()]); + } + + public String[] includeValuesAsStringArray() { + String[] result = new String[includes.size()]; + int i = 0; + for (TermBoost tb : includes.values()) { + result[i++] = tb.term; + } + return result; + } + + public String[] excludesAsArray() { + return excludes.toArray(new String[excludes.size()]); + } + + public int minDocCount() { + return minDocCount; + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest minDocCount(int value) { + minDocCount = value; + return this; + } + + + public int shardMinDocCount() { + return Math.min(shardMinDocCount, minDocCount); + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest shardMinDocCount(int value) { + shardMinDocCount = value; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("field", fieldName); + if (size != DEFAULT_SIZE) { + builder.field("size", size); + } + if (minDocCount != DEFAULT_MIN_DOC_COUNT) { + builder.field("min_doc_count", minDocCount); + } + if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { + builder.field("shard_min_doc_count", shardMinDocCount); + } + if(includes!=null) { + builder.startArray("include"); + for (TermBoost tb : includes.values()) { + builder.startObject(); + builder.field("term", tb.term); + builder.field("boost", tb.boost); + builder.endObject(); + } + builder.endArray(); + } + if(excludes!=null) { + builder.startArray("exclude"); + for (String value : excludes) { + builder.value(value); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java new file mode 100644 index 0000000000000..f4f666074a118 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java @@ -0,0 +1,24 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Request and Response objects for the default distribution's Graph + * APIs. + */ +package org.elasticsearch.protocol.xpack.graph; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/AbstractResultResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/AbstractResultResponse.java new file mode 100644 index 0000000000000..64f350933c9c4 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/AbstractResultResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Abstract class that provides a list of results and their count. + */ +public abstract class AbstractResultResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField COUNT = new ParseField("count"); + + private final ParseField resultsField; + protected final List results; + protected final long count; + + AbstractResultResponse(ParseField resultsField, List results, long count) { + this.resultsField = Objects.requireNonNull(resultsField, + "[results_field] must not be null"); + this.results = Collections.unmodifiableList(results); + this.count = count; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(COUNT.getPreferredName(), count); + builder.field(resultsField.getPreferredName(), results); + builder.endObject(); + return builder; + } + + public long count() { + return count; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequest.java new file mode 100644 index 0000000000000..3d54bfb9488a9 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequest.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.security.InvalidParameterException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class CloseJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_ID = new ParseField("job_id"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField FORCE = new ParseField("force"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "close_job_request", + true, a -> new CloseJobRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + JOB_ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareString((obj, val) -> obj.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareBoolean(CloseJobRequest::setForce, FORCE); + PARSER.declareBoolean(CloseJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private TimeValue timeout; + private Boolean force; + private Boolean allowNoJobs; + + /** + * Explicitly close all jobs + * + * @return a {@link CloseJobRequest} for all existing jobs + */ + public static CloseJobRequest closeAllJobsRequest(){ + return new CloseJobRequest(ALL_JOBS); + } + + CloseJobRequest(List jobIds) { + if (jobIds.isEmpty()) { + throw new InvalidParameterException("jobIds must not be empty"); + } + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Close the specified Jobs via their unique jobIds + * + * @param jobIds must be non-null and non-empty and each jobId must be non-null + */ + public CloseJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds to be closed + */ + public List getJobIds() { + return jobIds; + } + + /** + * How long to wait for the close request to complete before timing out. + * + * Default: 30 minutes + */ + public TimeValue getTimeout() { + return timeout; + } + + /** + * {@link CloseJobRequest#getTimeout()} + */ + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + /** + * Should the closing be forced. + * + * Use to close a failed job, or to forcefully close a job which has not responded to its initial close request. + */ + public Boolean isForce() { + return force; + } + + /** + * {@link CloseJobRequest#isForce()} + */ + public void setForce(boolean force) { + this.force = force; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + */ + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * {@link CloseJobRequest#isAllowNoJobs()} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, timeout, force, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobRequest that = (CloseJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(timeout, that.timeout) && + Objects.equals(force, that.force) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOB_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + } + if (force != null) { + builder.field(FORCE.getPreferredName(), force); + } + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponse.java new file mode 100644 index 0000000000000..9e1f38ef6bab7 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class CloseJobResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField CLOSED = new ParseField("closed"); + + public static final ObjectParser PARSER = + new ObjectParser<>("close_job_response", true, CloseJobResponse::new); + + static { + PARSER.declareBoolean(CloseJobResponse::setClosed, CLOSED); + } + + private boolean closed; + + CloseJobResponse() { + } + + public CloseJobResponse(boolean closed) { + this.closed = closed; + } + + public static CloseJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public boolean isClosed() { + return closed; + } + + public void setClosed(boolean closed) { + this.closed = closed; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + CloseJobResponse that = (CloseJobResponse) other; + return isClosed() == that.isClosed(); + } + + @Override + public int hashCode() { + return Objects.hash(isClosed()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CLOSED.getPreferredName(), closed); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequest.java new file mode 100644 index 0000000000000..4957f9b6ff6e7 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequest.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.protocol.xpack.ml.job.results.Result; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve buckets of a given job + */ +public class GetBucketsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField EXPAND = new ParseField("expand"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + public static final ObjectParser PARSER = new ObjectParser<>("get_buckets_request", GetBucketsRequest::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString(GetBucketsRequest::setTimestamp, Result.TIMESTAMP); + PARSER.declareBoolean(GetBucketsRequest::setExpand, EXPAND); + PARSER.declareBoolean(GetBucketsRequest::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(GetBucketsRequest::setStart, START); + PARSER.declareStringOrNull(GetBucketsRequest::setEnd, END); + PARSER.declareObject(GetBucketsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(GetBucketsRequest::setAnomalyScore, ANOMALY_SCORE); + PARSER.declareString(GetBucketsRequest::setSort, SORT); + PARSER.declareBoolean(GetBucketsRequest::setDescending, DESCENDING); + } + + private String jobId; + private String timestamp; + private Boolean expand; + private Boolean excludeInterim; + private String start; + private String end; + private PageParams pageParams; + private Double anomalyScore; + private String sort; + private Boolean descending; + + private GetBucketsRequest() {} + + /** + * Constructs a request to retrieve buckets of a given job + * @param jobId id of the job to retrieve buckets of + */ + public GetBucketsRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + /** + * Sets the timestamp of a specific bucket to be retrieved. + * @param timestamp the timestamp of a specific bucket to be retrieved + */ + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public String getTimestamp() { + return timestamp; + } + + public boolean isExpand() { + return expand; + } + + /** + * Sets the value of "expand". + * When {@code true}, buckets will be expanded to include their records. + * @param expand value of "expand" to be set + */ + public void setExpand(boolean expand) { + this.expand = expand; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + /** + * Sets the value of "exclude_interim". + * When {@code true}, interim buckets will be filtered out. + * @param excludeInterim value of "exclude_interim" to be set + */ + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + /** + * Sets the value of "start" which is a timestamp. + * Only buckets whose timestamp is on or after the "start" value will be returned. + * @param start value of "start" to be set + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Sets the value of "end" which is a timestamp. + * Only buckets whose timestamp is before the "end" value will be returned. + * @param end value of "end" to be set + */ + public void setEnd(String end) { + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * Sets the paging parameters + * @param pageParams the paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public Double getAnomalyScore() { + return anomalyScore; + } + + /** + * Sets the value of "anomaly_score". + * Only buckets with "anomaly_score" equal or greater will be returned. + * @param anomalyScore value of "anomaly_score". + */ + public void setAnomalyScore(double anomalyScore) { + this.anomalyScore = anomalyScore; + } + + public String getSort() { + return sort; + } + + /** + * Sets the value of "sort". + * Specifies the bucket field to sort on. + * @param sort value of "sort". + */ + public void setSort(String sort) { + this.sort = sort; + } + + public boolean isDescending() { + return descending; + } + + /** + * Sets the value of "desc". + * Specifies the sorting order. + * @param descending value of "desc" + */ + public void setDescending(boolean descending) { + this.descending = descending; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (timestamp != null) { + builder.field(Result.TIMESTAMP.getPreferredName(), timestamp); + } + if (expand != null) { + builder.field(EXPAND.getPreferredName(), expand); + } + if (excludeInterim != null) { + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (anomalyScore != null) { + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + if (descending != null) { + builder.field(DESCENDING.getPreferredName(), descending); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, expand, excludeInterim, anomalyScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsRequest other = (GetBucketsRequest) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(timestamp, other.timestamp) && + Objects.equals(expand, other.expand) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(anomalyScore, other.anomalyScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponse.java new file mode 100644 index 0000000000000..4350661f68b33 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.results.Bucket; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested buckets + */ +public class GetBucketsResponse extends AbstractResultResponse { + + public static final ParseField BUCKETS = new ParseField("buckets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_buckets_response", + true, a -> new GetBucketsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), Bucket.PARSER, BUCKETS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetBucketsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetBucketsResponse(List buckets, long count) { + super(BUCKETS, buckets, count); + } + + /** + * The retrieved buckets + * @return the retrieved buckets + */ + public List buckets() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetBucketsResponse other = (GetBucketsResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobRequest.java new file mode 100644 index 0000000000000..b0377c86fdc78 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobRequest.java @@ -0,0 +1,148 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request object to get {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} objects with the matching `jobId`s or + * `groupName`s. + * + * `_all` explicitly gets all the jobs in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs in the cluster + */ +public class GetJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField JOB_IDS = new ParseField("job_ids"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private static final String ALL_JOBS = "_all"; + private final List jobIds; + private Boolean allowNoJobs; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_job_request", + true, a -> new GetJobRequest(a[0] == null ? new ArrayList<>() : (List) a[0])); + + static { + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), JOB_IDS); + PARSER.declareBoolean(GetJobRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + /** + * Helper method to create a query that will get ALL jobs + * @return new {@link GetJobRequest} object searching for the jobId "_all" + */ + public static GetJobRequest getAllJobsRequest() { + return new GetJobRequest(ALL_JOBS); + } + + /** + * Get the specified {@link org.elasticsearch.protocol.xpack.ml.job.config.Job} configurations via their unique jobIds + * @param jobIds must not contain any null values + */ + public GetJobRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + GetJobRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * All the jobIds for which to get configuration information + */ + public List getJobIds() { + return jobIds; + } + + + /** + * See {@link GetJobRequest#isAllowNoJobs()} + * @param allowNoJobs + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * If this is `false`, then an error is returned when a wildcard (or `_all`) does not match any jobs + */ + public Boolean isAllowNoJobs() { + return allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || other.getClass() != getClass()) { + return false; + } + + GetJobRequest that = (GetJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (jobIds.isEmpty() == false) { + builder.field(JOB_IDS.getPreferredName(), jobIds); + } + + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + + builder.endObject(); + return builder; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobResponse.java new file mode 100644 index 0000000000000..4db542dc1526d --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/GetJobResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.config.Job; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link Job} objects and the total count found + */ +public class GetJobResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_response", true, + a -> new GetJobResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), Job.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), AbstractResultResponse.COUNT); + } + + GetJobResponse(List jobBuilders, long count) { + super(RESULTS_FIELD, jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()), count); + } + + /** + * The collection of {@link Job} objects found in the query + */ + public List jobs() { + return results; + } + + public static GetJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobResponse other = (GetJobResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java index b37bd35d6b17f..1bd9e87f6544c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/PutJobResponse.java @@ -39,9 +39,6 @@ public PutJobResponse(Job job) { this.job = job; } - public PutJobResponse() { - } - public Job getResponse() { return job; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java index 2b9957f9bc756..ea5f016993101 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/process/ModelSnapshot.java @@ -221,10 +221,8 @@ public boolean equals(Object other) { public static class Builder { private String jobId; - // Stored snapshot documents created prior to 6.3.0 will have no - // value for min_version. We default it to 5.5.0 as there were - // no model changes between 5.5.0 and 6.3.0. - private Version minVersion = Version.V_5_5_0; + // Stored snapshot documents created prior to 6.3.0 will have no value for min_version. + private Version minVersion = Version.V_6_3_0; private Date timestamp; private String description; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/PageParams.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/PageParams.java new file mode 100644 index 0000000000000..2e20e84d7b81b --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/util/PageParams.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.job.util; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Paging parameters for GET requests + */ +public class PageParams implements ToXContentObject { + + public static final ParseField PAGE = new ParseField("page"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField SIZE = new ParseField("size"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(PAGE.getPreferredName(), + a -> new PageParams((Integer) a[0], (Integer) a[1])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FROM); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SIZE); + } + + private final Integer from; + private final Integer size; + + /** + * Constructs paging parameters + * @param from skips the specified number of items. When {@code null} the default value will be used. + * @param size specifies the maximum number of items to obtain. When {@code null} the default value will be used. + */ + public PageParams(@Nullable Integer from, @Nullable Integer size) { + this.from = from; + this.size = size; + } + + public int getFrom() { + return from; + } + + public int getSize() { + return size; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(FROM.getPreferredName(), from); + } + if (size != null) { + builder.field(SIZE.getPreferredName(), size); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(from, size); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PageParams other = (PageParams) obj; + return Objects.equals(from, other.from) && + Objects.equals(size, other.size); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java index 42e957ecf2d51..e08289e98215c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/User.java @@ -19,7 +19,6 @@ package org.elasticsearch.protocol.xpack.security; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -199,12 +198,7 @@ public static User partialReadFrom(String username, StreamInput input) throws IO boolean hasInnerUser = input.readBoolean(); if (hasInnerUser) { User innerUser = readFrom(input); - if (input.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: runas user was read first, so reverse outer and inner - return new User(innerUser, outerUser); - } else { - return new User(outerUser, innerUser); - } + return new User(outerUser, innerUser); } else { return outerUser; } @@ -221,11 +215,6 @@ public static void writeTo(User user, StreamOutput output) throws IOException { if (user.authenticatedUser == null) { // no backcompat necessary, since there is no inner user writeUser(user, output); - } else if (output.getVersion().onOrBefore(Version.V_5_4_0)) { - // backcompat: write runas user as the "inner" user - writeUser(user.authenticatedUser, output); - output.writeBoolean(true); - writeUser(user, output); } else { writeUser(user, output); output.writeBoolean(true); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java new file mode 100644 index 0000000000000..0f8f055049be7 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.graph; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { + + @Override + protected GraphExploreResponse createTestInstance() { + return createInstance(0); + } + private static GraphExploreResponse createInstance(int numFailures) { + int numItems = randomIntBetween(4, 128); + boolean timedOut = randomBoolean(); + boolean showDetails = randomBoolean(); + long overallTookInMillis = randomNonNegativeLong(); + Map vertices = new HashMap<>(); + Map connections = new HashMap<>(); + ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = new ShardSearchFailure(new ElasticsearchException("an error")); + } + + //Create random set of vertices + for (int i = 0; i < numItems; i++) { + Vertex v = new Vertex("field1", randomAlphaOfLength(5), randomDouble(), 0, + showDetails?randomIntBetween(100, 200):0, + showDetails?randomIntBetween(1, 100):0); + vertices.put(v.getId(), v); + } + + //Wire up half the vertices randomly + Vertex[] vs = vertices.values().toArray(new Vertex[vertices.size()]); + for (int i = 0; i < numItems/2; i++) { + Vertex v1 = vs[randomIntBetween(0, vs.length-1)]; + Vertex v2 = vs[randomIntBetween(0, vs.length-1)]; + if(v1 != v2) { + Connection conn = new Connection(v1, v2, randomDouble(), randomLongBetween(1, 10)); + connections.put(conn.getId(), conn); + } + } + return new GraphExploreResponse(overallTookInMillis, timedOut, failures, vertices, connections, showDetails); + } + + + private static GraphExploreResponse createTestInstanceWithFailures() { + return createInstance(randomIntBetween(1, 128)); + } + + @Override + protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { + return GraphExploreResponse.fromXContext(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected String[] getShuffleFieldsExceptions() { + return new String[]{"vertices"}; + } + + protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { + return field -> field.startsWith("responses"); + } + + @Override + protected void assertEqualInstances( GraphExploreResponse expectedInstance, GraphExploreResponse newInstance) { + assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook())); + assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut())); + + Connection[] newConns = newInstance.getConnections().toArray(new Connection[0]); + Connection[] expectedConns = expectedInstance.getConnections().toArray(new Connection[0]); + assertArrayEquals(expectedConns, newConns); + + Vertex[] newVertices = newInstance.getVertices().toArray(new Vertex[0]); + Vertex[] expectedVertices = expectedInstance.getVertices().toArray(new Vertex[0]); + assertArrayEquals(expectedVertices, newVertices); + + ShardOperationFailedException[] newFailures = newInstance.getShardFailures(); + ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures(); + assertEquals(expectedFailures.length, newFailures.length); + + } + + /** + * Test parsing {@link GraphExploreResponse} with inner failures as they don't support asserting on xcontent equivalence, given + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier< GraphExploreResponse> instanceSupplier = GraphExploreResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); + } + +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequestTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequestTests.java new file mode 100644 index 0000000000000..435504b52983d --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobRequestTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class CloseJobRequestTests extends AbstractXContentTestCase { + + public void testCloseAllJobsRequest() { + CloseJobRequest request = CloseJobRequest.closeAllJobsRequest(); + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testWithNullJobIds() { + Exception exception = expectThrows(IllegalArgumentException.class, CloseJobRequest::new); + assertEquals(exception.getMessage(), "jobIds must not be empty"); + + exception = expectThrows(NullPointerException.class, () -> new CloseJobRequest("job1", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + + @Override + protected CloseJobRequest createTestInstance() { + int jobCount = randomIntBetween(1, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + CloseJobRequest request = new CloseJobRequest(jobIds.toArray(new String[0])); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + if (randomBoolean()) { + request.setTimeout(TimeValue.timeValueMinutes(randomIntBetween(1, 10))); + } + + if (randomBoolean()) { + request.setForce(randomBoolean()); + } + + return request; + } + + @Override + protected CloseJobRequest doParseInstance(XContentParser parser) throws IOException { + return CloseJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponseTests.java new file mode 100644 index 0000000000000..d161fde536eca --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/CloseJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class CloseJobResponseTests extends AbstractXContentTestCase { + + @Override + protected CloseJobResponse createTestInstance() { + return new CloseJobResponse(randomBoolean()); + } + + @Override + protected CloseJobResponse doParseInstance(XContentParser parser) throws IOException { + return CloseJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequestTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequestTests.java new file mode 100644 index 0000000000000..6364ad339b120 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsRequestTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetBucketsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsRequest createTestInstance() { + GetBucketsRequest request = new GetBucketsRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTimestamp(String.valueOf(randomLong())); + } else { + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("anomaly_score"); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + } + if (randomBoolean()) { + request.setExpand(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected GetBucketsRequest doParseInstance(XContentParser parser) throws IOException { + return GetBucketsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponseTests.java new file mode 100644 index 0000000000000..889c3e93bc708 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetBucketsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.results.Bucket; +import org.elasticsearch.protocol.xpack.ml.job.results.BucketTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetBucketsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetBucketsResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List buckets = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + Bucket bucket = BucketTests.createTestInstance(jobId); + buckets.add(bucket); + } + return new GetBucketsResponse(buckets, listSize); + } + + @Override + protected GetBucketsResponse doParseInstance(XContentParser parser) throws IOException { + return GetBucketsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobRequestTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobRequestTests.java new file mode 100644 index 0000000000000..b94b704fbf6e8 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobRequest request = GetJobRequest.getAllJobsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobRequest("job",null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobRequest request = new GetJobRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobResponseTests.java new file mode 100644 index 0000000000000..79d4d678b9295 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/GetJobResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobTests.createRandomizedJobBuilder()); + } + + return new GetJobResponse(results, count); + } + + @Override + protected GetJobResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java index 7ba4946efa753..61931743403e0 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/config/JobTests.java @@ -210,7 +210,7 @@ public static AnalysisConfig.Builder createAnalysisConfig() { return new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); } - public static Job createRandomizedJob() { + public static Job.Builder createRandomizedJobBuilder() { String jobId = randomValidJobId(); Job.Builder builder = new Job.Builder(jobId); if (randomBoolean()) { @@ -265,7 +265,11 @@ public static Job createRandomizedJob() { if (randomBoolean()) { builder.setResultsIndexName(randomValidJobId()); } - return builder.build(); + return builder; + } + + public static Job createRandomizedJob() { + return createRandomizedJobBuilder().build(); } @Override diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java index 28b1893afe18b..0eb988d8eb82b 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/results/BucketTests.java @@ -35,7 +35,7 @@ public Bucket createTestInstance() { return createTestInstance("foo"); } - public Bucket createTestInstance(String jobId) { + public static Bucket createTestInstance(String jobId) { Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/util/PageParamsTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/util/PageParamsTests.java new file mode 100644 index 0000000000000..6bd51e93c6f37 --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/util/PageParamsTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml.util; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.ml.job.util.PageParams; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class PageParamsTests extends AbstractXContentTestCase { + + @Override + protected PageParams doParseInstance(XContentParser parser) { + return PageParams.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected PageParams createTestInstance() { + Integer from = randomBoolean() ? randomInt() : null; + Integer size = randomBoolean() ? randomInt() : null; + return new PageParams(from, size); + } +} diff --git a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java index c0111e57c7448..d1ee4f2d9e104 100644 --- a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java +++ b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.audit; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.http.message.BasicHeader; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -111,10 +110,12 @@ public NamedWriteableRegistry getNamedWriteableRegistry() { } public void testIndexAuditTrailWorking() throws Exception { - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, - UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())))); - assertThat(response.getStatusLine().getStatusCode(), is(200)); + Request request = new Request("GET", "/"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))); + request.setOptions(options); + Response response = getRestClient().performRequest(request); final AtomicReference lastClusterState = new AtomicReference<>(); final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER)); diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle new file mode 100644 index 0000000000000..03f2a56987310 --- /dev/null +++ b/x-pack/qa/evil-tests/build.gradle @@ -0,0 +1,9 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" +} + +test { + systemProperty 'tests.security.manager', 'false' +} diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java new file mode 100644 index 0000000000000..2dfd314ffb06e --- /dev/null +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/core/scheduler/EvilSchedulerEngineTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class EvilSchedulerEngineTests extends ESTestCase { + + public void testOutOfMemoryErrorWhileTriggeredIsRethrownAndIsUncaught() throws InterruptedException { + final AtomicReference maybeFatal = new AtomicReference<>(); + final CountDownLatch uncaughtLatuch = new CountDownLatch(1); + final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); + try { + /* + * We want to test that the out of memory error thrown from the scheduler engine goes uncaught on another thread; this gives us + * confidence that an error thrown during a triggered event will lead to the node being torn down. + */ + final AtomicReference maybeThread = new AtomicReference<>(); + Thread.setDefaultUncaughtExceptionHandler((t, e) -> { + maybeFatal.set(e); + maybeThread.set(Thread.currentThread()); + uncaughtLatuch.countDown(); + }); + final Logger mockLogger = mock(Logger.class); + final SchedulerEngine engine = new SchedulerEngine(Settings.EMPTY, Clock.systemUTC(), mockLogger); + try { + final AtomicBoolean trigger = new AtomicBoolean(); + engine.register(event -> { + if (trigger.compareAndSet(false, true)) { + throw new OutOfMemoryError("640K ought to be enough for anybody"); + } else { + fail("listener invoked twice"); + } + }); + final CountDownLatch schedulerLatch = new CountDownLatch(1); + engine.add(new SchedulerEngine.Job( + getTestName(), + (startTime, now) -> { + if (schedulerLatch.getCount() == 1) { + schedulerLatch.countDown(); + return 0; + } else { + throw new AssertionError("nextScheduledTimeAfter invoked more than the expected number of times"); + } + })); + + uncaughtLatuch.await(); + assertTrue(trigger.get()); + assertNotNull(maybeFatal.get()); + assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class)); + assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody"))); + assertNotNull(maybeThread.get()); + assertThat(maybeThread.get(), not(equalTo(Thread.currentThread()))); // the error should be rethrown on another thread + schedulerLatch.await(); + verifyNoMoreInteractions(mockLogger); // we never logged anything + } finally { + engine.stop(); + } + } finally { + // restore the uncaught exception handler + Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); + } + } + +} diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3cf2970120675..ab8f9172b690c 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -11,7 +11,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile (project(path: xpackModule('security'), configuration: 'runtime')) { // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency. // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper @@ -249,7 +250,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 59667d9ee7809..f680a45bd7f57 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java deleted file mode 100644 index e7381050260c4..0000000000000 --- a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.io.IOException; -import java.net.URLEncoder; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.common.xcontent.XContentType.JSON; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class MlBasicMultiNodeIT extends ESRestTestCase { - - @SuppressWarnings("unchecked") - public void testMachineLearningInstalled() throws Exception { - Response response = client().performRequest("get", "/_xpack"); - assertEquals(200, response.getStatusLine().getStatusCode()); - Map features = (Map) responseEntityToMap(response).get("features"); - Map ml = (Map) features.get("ml"); - assertNotNull(ml); - assertTrue((Boolean) ml.get("available")); - assertTrue((Boolean) ml.get("enabled")); - } - - public void testInvalidJob() throws Exception { - // The job name is invalid because it contains a space - String jobId = "invalid job"; - ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); - assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); - // If validation of the invalid job is not done until after transportation to the master node then the - // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to - // validate before transportation to avoid this. This test must be done in a multi-node cluster to have - // a chance of catching a problem, hence it is here rather than in the single node integration tests. - assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); - } - - public void testMiniFarequote() throws Exception { - String jobId = "mini-farequote-job"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(2, responseBody.get("processed_record_count")); - assertEquals(4, responseBody.get("processed_field_count")); - assertEquals(177, responseBody.get("input_bytes")); - assertEquals(6, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - assertEquals(4, dataCountsDoc.get("processed_field_count")); - assertEquals(177, dataCountsDoc.get("input_bytes")); - assertEquals(6, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(0, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteWithDatafeeder() throws Exception { - String mappings = "{" - + " \"mappings\": {" - + " \"response\": {" - + " \"properties\": {" - + " \"time\": { \"type\":\"date\"}," - + " \"airline\": { \"type\":\"keyword\"}," - + " \"responsetime\": { \"type\":\"float\"}" - + " }" - + " }" - + " }" - + "}"; - client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", - ContentType.APPLICATION_JSON)); - client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), - new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", - ContentType.APPLICATION_JSON)); - - // Ensure all data is searchable - client().performRequest("post", "_refresh"); - - String jobId = "mini-farequote-with-data-feeder-job"; - createFarequoteJob(jobId); - String datafeedId = "bar"; - createDatafeed(datafeedId, jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start", - Collections.singletonMap("start", "0")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("started", true), responseEntityToMap(response)); - - assertBusy(() -> { - try { - Response statsResponse = - client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, statsResponse.getStatusLine().getStatusCode()); - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(statsResponse).get("jobs")).get(0)).get("data_counts"); - assertEquals(2, dataCountsDoc.get("input_record_count")); - assertEquals(2, dataCountsDoc.get("processed_record_count")); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("stopped", true), responseEntityToMap(response)); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - public void testMiniFarequoteReopen() throws Exception { - String jobId = "mini-farequote-reopen"; - createFarequoteJob(jobId); - - Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - String postData = - "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + - "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + - "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + - "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + - "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - Map responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(446, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(0, responseBody.get("bucket_count")); - assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); - assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFlushResponse(response, true, 1403481600000L); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); - - // feed some more data points - postData = - "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + - "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + - "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + - "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + - "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}"; - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", - Collections.emptyMap(), - new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); - assertEquals(202, response.getStatusLine().getStatusCode()); - responseBody = responseEntityToMap(response); - assertEquals(5, responseBody.get("processed_record_count")); - assertEquals(10, responseBody.get("processed_field_count")); - assertEquals(442, responseBody.get("input_bytes")); - assertEquals(15, responseBody.get("input_field_count")); - assertEquals(0, responseBody.get("invalid_date_count")); - assertEquals(0, responseBody.get("missing_field_count")); - assertEquals(0, responseBody.get("out_of_order_timestamp_count")); - assertEquals(1000, responseBody.get("bucket_count")); - - // unintuitive: should return the earliest record timestamp of this feed??? - assertEquals(null, responseBody.get("earliest_record_timestamp")); - assertEquals(1407082000000L, responseBody.get("latest_record_timestamp")); - - response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", - Collections.singletonMap("timeout", "20s")); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); - - // counts should be summed up - response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); - assertEquals(200, response.getStatusLine().getStatusCode()); - - @SuppressWarnings("unchecked") - Map dataCountsDoc = (Map) - ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); - assertEquals(10, dataCountsDoc.get("processed_record_count")); - assertEquals(20, dataCountsDoc.get("processed_field_count")); - assertEquals(888, dataCountsDoc.get("input_bytes")); - assertEquals(30, dataCountsDoc.get("input_field_count")); - assertEquals(0, dataCountsDoc.get("invalid_date_count")); - assertEquals(0, dataCountsDoc.get("missing_field_count")); - assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); - assertEquals(1000, dataCountsDoc.get("bucket_count")); - assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); - assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); - - response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); - assertEquals(200, response.getStatusLine().getStatusCode()); - } - - private Response createDatafeed(String datafeedId, String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.array("indexes", "airline-data"); - xContentBuilder.array("types", "response"); - xContentBuilder.field("_source", true); - xContentBuilder.endObject(); - return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private Response createFarequoteJob(String jobId) throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("job_id", jobId); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8"), - Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); - } - - private static Map responseEntityToMap(Response response) throws IOException { - return XContentHelper.convertToMap(JSON.xContent(), response.getEntity().getContent(), false); - } - - private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) - throws IOException { - Map asMap = responseEntityToMap(response); - assertThat(asMap.size(), equalTo(2)); - assertThat(asMap.get("flushed"), is(true)); - assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); - } -} diff --git a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java deleted file mode 100644 index 3bb9566e5bf17..0000000000000 --- a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.util.Collections; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; - -public class MlPluginDisabledIT extends ESRestTestCase { - - /** - * Check that when the ml plugin is disabled, you cannot create a job as the - * rest handler is not registered - */ - public void testActionsFail() throws Exception { - XContentBuilder xContentBuilder = jsonBuilder(); - xContentBuilder.startObject(); - xContentBuilder.field("actions-fail-job", "foo"); - xContentBuilder.field("description", "Analysis of response time by airline"); - - xContentBuilder.startObject("analysis_config"); - xContentBuilder.field("bucket_span", "3600s"); - xContentBuilder.startArray("detectors"); - xContentBuilder.startObject(); - xContentBuilder.field("function", "metric"); - xContentBuilder.field("field_name", "responsetime"); - xContentBuilder.field("by_field_name", "airline"); - xContentBuilder.endObject(); - xContentBuilder.endArray(); - xContentBuilder.endObject(); - - xContentBuilder.startObject("data_description"); - xContentBuilder.field("format", "xcontent"); - xContentBuilder.field("time_field", "time"); - xContentBuilder.field("time_format", "epoch"); - xContentBuilder.endObject(); - xContentBuilder.endObject(); - - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("put", - MachineLearning.BASE_PATH + "anomaly_detectors/foo", Collections.emptyMap(), - new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON))); - assertThat(exception.getMessage(), containsString("no handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); - } -} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java new file mode 100644 index 0000000000000..95ec9728842c6 --- /dev/null +++ b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class CloseJobsIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + public void testCloseJobsAcceptsOptionsFromPayload() throws Exception { + + Request request = new Request("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + "job-that-doesnot-exist*" + "/_close"); + request.setJsonEntity("{\"allow_no_jobs\":false}"); + request.setOptions(RequestOptions.DEFAULT); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + + request.setJsonEntity("{\"allow_no_jobs\":true}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertEquals(responseAsString, "{\"closed\":true}"); + } + + private static String responseEntityToString(Response response) throws IOException { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } +} diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index 5d90f974762bc..c06ad68d80325 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -3,7 +3,8 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index 19729cf367ef5..4369287caba32 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } integTestCluster { diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java index abc784b4cb286..18cd67ff271ad 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.multi_node; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -16,10 +15,6 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -59,12 +54,15 @@ public void testGlobalCheckpointSyncActionRunsAsPrivilegedUser() throws Exceptio builder.endObject(); } builder.endObject(); - final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "test-index", Collections.emptyMap(), entity); + Request createIndexRequest = new Request("PUT", "/test-index"); + createIndexRequest.setJsonEntity(Strings.toString(builder)); + client().performRequest(createIndexRequest); } // wait for the replica to recover - client().performRequest("GET", "/_cluster/health", Collections.singletonMap("wait_for_status", "green")); + Request healthRequest = new Request("GET", "/_cluster/health"); + healthRequest.addParameter("wait_for_status", "green"); + client().performRequest(healthRequest); // index some documents final int numberOfDocuments = randomIntBetween(0, 128); @@ -75,17 +73,18 @@ public void testGlobalCheckpointSyncActionRunsAsPrivilegedUser() throws Exceptio builder.field("foo", i); } builder.endObject(); - final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/test-index/test-type/" + i, Collections.emptyMap(), entity); + Request indexRequest = new Request("PUT", "/test-index/test-type/" + i); + indexRequest.setJsonEntity(Strings.toString(builder)); + client().performRequest(indexRequest); } } // we have to wait for the post-operation global checkpoint sync to propagate to the replica assertBusy(() -> { - final Map params = new HashMap<>(2); - params.put("level", "shards"); - params.put("filter_path", "**.seq_no"); - final Response response = client().performRequest("GET", "/test-index/_stats", params); + final Request request = new Request("GET", "/test-index/_stats"); + request.addParameter("level", "shards"); + request.addParameter("filter_path", "**.seq_no"); + final Response response = client().performRequest(request); final ObjectPath path = ObjectPath.createFromResponse(response); // int looks funny here since global checkpoints are longs but the response parser does not know enough to treat them as long final int shard0GlobalCheckpoint = path.evaluate("indices.test-index.shards.0.0.seq_no.global_checkpoint"); diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 24cd6184afa63..bb9a979928978 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -5,7 +5,8 @@ apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.vagrantsupport' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 097d343b27984..97c0e8e17fee7 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:reindex') diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 21ac4414d86b2..5774e5d78561d 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -7,7 +7,8 @@ import java.nio.charset.StandardCharsets apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index b983caa866937..548081a893881 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -10,7 +10,8 @@ apply plugin: 'elasticsearch.build' test.enabled = false dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } @@ -284,7 +285,8 @@ subprojects { check.dependsOn(integTest) dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('watcher')) } diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 752ec6fb3071b..9dd5d6d848f9a 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -6,7 +6,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile 'com.google.jimfs:jimfs:1.1' diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index 97945fb00efcd..e676e55a152d4 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 7aeed3ad62de6..aef4fc33f6abe 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -8,7 +8,7 @@ esplugin { } dependencies { - compileOnly project(path: xpackModule('core'), configuration: 'shadow') + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle index 3a8a0cf100554..abc3564ca13f2 100644 --- a/x-pack/qa/security-migrate-tests/build.gradle +++ b/x-pack/qa/security-migrate-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index adb159acf6f6b..c0801a38b570c 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/smoke-test-graph-with-security/build.gradle b/x-pack/qa/smoke-test-graph-with-security/build.gradle index 9cdfaffccfbce..f0f819b46d478 100644 --- a/x-pack/qa/smoke-test-graph-with-security/build.gradle +++ b/x-pack/qa/smoke-test-graph-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in graph rest test suite diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 8ce0cde76575a..7813ff3d3d56c 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher')) testCompile project(path: xpackModule('monitoring')) } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index d89d558f02fae..d3b9a974398da 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; @@ -23,7 +21,6 @@ import org.junit.After; import java.io.IOException; -import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -36,25 +33,25 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase { @After public void cleanExporters() throws Exception { - String body = Strings.toString(jsonBuilder().startObject().startObject("transient") - .nullField("xpack.monitoring.exporters.*") - .endObject().endObject()); - assertOK(adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON))); - - assertOK(adminClient().performRequest("DELETE", ".watch*", Collections.emptyMap())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .nullField("xpack.monitoring.exporters.*") + .endObject().endObject())); + adminClient().performRequest(request); + adminClient().performRequest(new Request("DELETE", "/.watch*")); } public void testThatLocalExporterAddsWatches() throws Exception { String watchId = createMonitoringWatch(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_local_exporter.type", "local") - .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_local_exporter.type", "local") + .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -65,14 +62,14 @@ public void testThatHttpExporterAddsWatches() throws Exception { String watchId = createMonitoringWatch(); String httpHost = getHttpHost(); - String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") - .field("xpack.monitoring.exporters.my_http_exporter.type", "http") - .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) - .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) - .endObject().endObject()).utf8ToString(); - - adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), - new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(Strings.toString(jsonBuilder().startObject() + .startObject("transient") + .field("xpack.monitoring.exporters.my_http_exporter.type", "http") + .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) + .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject())); + adminClient().performRequest(request); assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); @@ -80,15 +77,15 @@ public void testThatHttpExporterAddsWatches() throws Exception { } private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_xpack/watcher/watch/" + watchId)); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_xpack/watcher/watch/" + watchId))); String interval = path.evaluate("watch.trigger.schedule.interval"); assertThat(interval, is("1m")); } private void assertTotalWatchCount(int expectedWatches) throws Exception { assertBusy(() -> { - assertOK(client().performRequest("POST", ".watches/_refresh")); - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("POST", ".watches/_count")); + assertOK(client().performRequest(new Request("POST", "/.watches/_refresh"))); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("POST", "/.watches/_count"))); int count = path.evaluate("count"); assertThat(count, is(expectedWatches)); }); @@ -97,28 +94,28 @@ private void assertTotalWatchCount(int expectedWatches) throws Exception { private String createMonitoringWatch() throws Exception { String clusterUUID = getClusterUUID(); String watchId = clusterUUID + "_kibana_version_mismatch"; - String sampleWatch = WatchSourceBuilders.watchBuilder() + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(WatchSourceBuilders.watchBuilder() .trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES)))) .input(simpleInput()) .addAction("logme", ActionBuilders.loggingAction("foo")) - .buildAsBytes(XContentType.JSON).utf8ToString(); - client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), - new StringEntity(sampleWatch, ContentType.APPLICATION_JSON)); + .buildAsBytes(XContentType.JSON).utf8ToString()); + client().performRequest(request); return watchId; } private String getClusterUUID() throws Exception { - Response response = client().performRequest("GET", "_cluster/state/metadata", Collections.emptyMap()); + Response response = client().performRequest(new Request("GET", "/_cluster/state/metadata")); ObjectPath objectPath = ObjectPath.createFromResponse(response); String clusterUUID = objectPath.evaluate("metadata.cluster_uuid"); return clusterUUID; } public String getHttpHost() throws IOException { - ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_cluster/state", Collections.emptyMap())); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_cluster/state"))); String masterNodeId = path.evaluate("master_node"); - ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest("GET", "_nodes", Collections.emptyMap())); + ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_nodes"))); String httpHost = nodesPath.evaluate("nodes." + masterNodeId + ".http.publish_address"); return httpHost; } diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 53533bd9b87f3..4f338d07fb531 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -15,7 +15,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } String outputDir = "${buildDir}/generated-resources/${project.name}" @@ -138,4 +138,4 @@ processTestResources { inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } -} \ No newline at end of file +} diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index b66903af18bfb..3b7661eeeb05a 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } ext.pluginsCount = 0 diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle index d921c5f5b6605..48b525ba3dae9 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/build.gradle +++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle @@ -2,7 +2,8 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index a843641be801f..50e217b28b270 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } // bring in watcher rest test suite diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index a989bb476118f..0c4afff509e94 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -7,9 +7,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.client.Response; +import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -49,9 +47,9 @@ public void startWatcher() throws Exception { emptyList(), emptyMap()); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request request = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + request.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(request); assertBusy(() -> { ClientYamlTestResponse response = @@ -129,4 +127,3 @@ protected Settings restAdminSettings() { .build(); } } - diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 1c8204aa1ec62..665b92bbc0e3f 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -21,7 +20,6 @@ import org.junit.Before; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -41,27 +39,28 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { - StringEntity entity = new StringEntity("{ \"value\" : \"15\" }", ContentType.APPLICATION_JSON); - assertOK(adminClient().performRequest("PUT", "my_test_index/doc/1", Collections.singletonMap("refresh", "true"), entity)); + Request createAllowedDoc = new Request("PUT", "/my_test_index/doc/1"); + createAllowedDoc.setJsonEntity("{ \"value\" : \"15\" }"); + createAllowedDoc.addParameter("refresh", "true"); + adminClient().performRequest(createAllowedDoc); // delete the watcher history to not clutter with entries from other test - adminClient().performRequest("DELETE", ".watcher-history-*", Collections.emptyMap()); + adminClient().performRequest(new Request("DELETE", ".watcher-history-*")); // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed - Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), - new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); - assertThat(resp.getStatusLine().getStatusCode(), is(201)); + Request createNotAllowedDoc = new Request("PUT", "/index_not_allowed_to_read/doc/1"); + createNotAllowedDoc.setJsonEntity("{\"foo\":\"bar\"}"); + adminClient().performRequest(createNotAllowedDoc); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "_xpack/watcher/_start"); - assertOK(startResponse); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); String body = EntityUtils.toString(startResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -82,18 +81,18 @@ public void startWatcher() throws Exception { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + assertOK(adminClient().performRequest(new Request("HEAD", "_template/" + template))); } }); } @After public void stopWatcher() throws Exception { - assertOK(adminClient().performRequest("DELETE", "my_test_index")); + adminClient().performRequest(new Request("DELETE", "/my_test_index")); assertBusy(() -> { try { - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); String state = objectPath.evaluate("stats.0.watcher_state"); @@ -106,8 +105,7 @@ public void stopWatcher() throws Exception { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); - assertOK(stopResponse); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); String body = EntityUtils.toString(stopResponse.getEntity()); assertThat(body, containsString("\"acknowledged\":true")); break; @@ -210,7 +208,7 @@ public void testSearchTransformHasPermissions() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String value = getObjectPath.evaluate("_source.hits.hits.0._source.value"); assertThat(value, is("15")); } @@ -238,8 +236,7 @@ public void testSearchTransformInsufficientPermissions() throws Exception { getWatchHistoryEntry(watchId); - Response response = adminClient().performRequest("GET", "my_test_index/doc/some-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/my_test_index/doc/some-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } @@ -262,7 +259,7 @@ public void testIndexActionHasPermissions() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/my_test_index/doc/my-id"))); String spam = getObjectPath.evaluate("_source.spam"); assertThat(spam, is("eggs")); } @@ -286,16 +283,14 @@ public void testIndexActionInsufficientPrivileges() throws Exception { boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(true)); - Response response = adminClient().performRequest("GET", "index_not_allowed_to_read/doc/my-id", - Collections.singletonMap("ignore", "404")); + Response response = adminClient().performRequest(new Request("HEAD", "/index_not_allowed_to_read/doc/my-id")); assertThat(response.getStatusLine().getStatusCode(), is(404)); } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } @@ -307,7 +302,7 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -323,8 +318,9 @@ private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exc .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index dc87248df617f..5923afcacad94 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:lang-painless', configuration: 'runtime') diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 86d97d01904fe..f56f96efc7883 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -5,8 +5,7 @@ */ package org.elasticsearch.smoketest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -23,7 +22,6 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -39,15 +37,15 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { // delete the watcher history to not clutter with entries from other test - assertOK(adminClient().performRequest("DELETE", ".watcher-history-*")); + assertOK(adminClient().performRequest(new Request("DELETE", "/.watcher-history-*"))); assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { case "stopped": - Response startResponse = adminClient().performRequest("POST", "/_xpack/watcher/_start"); + Response startResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_start")); boolean isAcknowledged = ObjectPath.createFromResponse(startResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -65,7 +63,7 @@ public void startWatcher() throws Exception { assertBusy(() -> { for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - Response templateExistsResponse = adminClient().performRequest("HEAD", "_template/" + template, emptyMap()); + Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template)); assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200)); } }); @@ -74,7 +72,7 @@ public void startWatcher() throws Exception { @After public void stopWatcher() throws Exception { assertBusy(() -> { - Response response = adminClient().performRequest("GET", "_xpack/watcher/stats", emptyMap()); + Response response = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); switch (state) { @@ -86,7 +84,7 @@ public void stopWatcher() throws Exception { case "starting": throw new AssertionError("waiting until starting state reached started state to stop"); case "started": - Response stopResponse = adminClient().performRequest("POST", "/_xpack/watcher/_stop", emptyMap()); + Response stopResponse = adminClient().performRequest(new Request("POST", "/_xpack/watcher/_stop")); boolean isAcknowledged = ObjectPath.createFromResponse(stopResponse).evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); break; @@ -112,12 +110,12 @@ public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; // get master publish address - Response clusterStateResponse = adminClient().performRequest("GET", "_cluster/state"); + Response clusterStateResponse = adminClient().performRequest(new Request("GET", "/_cluster/state")); ObjectPath clusterState = ObjectPath.createFromResponse(clusterStateResponse); String masterNode = clusterState.evaluate("master_node"); assertThat(masterNode, is(notNullValue())); - Response statsResponse = adminClient().performRequest("GET", "_nodes"); + Response statsResponse = adminClient().performRequest(new Request("GET", "/_nodes")); ObjectPath stats = ObjectPath.createFromResponse(statsResponse); String address = stats.evaluate("nodes." + masterNode + ".http.publish_address"); assertThat(address, is(notNullValue())); @@ -163,16 +161,15 @@ public void testMonitorClusterHealth() throws Exception { } private void indexWatch(String watchId, XContentBuilder builder) throws Exception { - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, emptyMap(), entity); - assertOK(response); + Request request = new Request("PUT", "/_xpack/watcher/watch/" + watchId); + request.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(request); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); } private void deleteWatch(String watchId) throws IOException { - Response response = client().performRequest("DELETE", "_xpack/watcher/watch/" + watchId); + Response response = client().performRequest(new Request("DELETE", "/_xpack/watcher/watch/" + watchId)); assertOK(response); ObjectPath path = ObjectPath.createFromResponse(response); boolean found = path.evaluate("found"); @@ -182,7 +179,7 @@ private void deleteWatch(String watchId) throws IOException { private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); assertBusy(() -> { - client().performRequest("POST", ".watcher-history-*/_refresh"); + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -194,8 +191,9 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { .endObject().endArray(); builder.endObject(); - StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", emptyMap(), entity); + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); @@ -208,7 +206,7 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { } private void assertWatchCount(int expectedWatches) throws IOException { - Response watcherStatsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + Response watcherStatsResponse = adminClient().performRequest(new Request("GET", "/_xpack/watcher/stats")); ObjectPath objectPath = ObjectPath.createFromResponse(watcherStatsResponse); int watchCount = objectPath.evaluate("stats.0.watch_count"); assertThat(watchCount, is(expectedWatches)); diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 17a1d5acdc99f..baaf0451e51f2 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.RunTask description = 'Integration tests for SQL' @@ -29,8 +28,7 @@ dependenciesInfo.enabled = false // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] + replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } thirdPartyAudit.excludes = [ diff --git a/x-pack/qa/sql/security/build.gradle b/x-pack/qa/sql/security/build.gradle index f02886f80a103..15f7734f9422e 100644 --- a/x-pack/qa/sql/security/build.gradle +++ b/x-pack/qa/sql/security/build.gradle @@ -1,5 +1,5 @@ dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } Project mainProject = project @@ -20,7 +20,7 @@ subprojects { } dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } integTestCluster { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java index d61b4b9a946bd..4d90c9cce502b 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -13,6 +13,7 @@ import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -37,8 +38,7 @@ public static List readScriptSpec() throws Exception { tests.addAll(readScriptSpec("/agg.sql-spec", parser)); tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); tests.addAll(readScriptSpec("/string-functions.sql-spec", parser)); - // AwaitsFix: https://github.com/elastic/elasticsearch/issues/32589 - // tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); return tests; } @@ -60,8 +60,11 @@ public SqlSpecTestCase(String fileName, String groupName, String testName, Integ @Override protected final void doTest() throws Throwable { - boolean goodLocale = !(Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr").build()) - || Locale.getDefault().equals(new Locale.Builder().setLanguageTag("tr-TR").build())); + // we skip the tests in case of these locales because ES-SQL is Locale-insensitive for now + // while H2 does take the Locale into consideration + String[] h2IncompatibleLocales = new String[] {"tr", "az", "tr-TR", "tr-CY", "az-Latn", "az-Cyrl", "az-Latn-AZ", "az-Cyrl-AZ"}; + boolean goodLocale = !Arrays.stream(h2IncompatibleLocales) + .anyMatch((l) -> Locale.getDefault().equals(new Locale.Builder().setLanguageTag(l).build())); if (fileName.startsWith("case-functions")) { Assume.assumeTrue(goodLocale); } diff --git a/x-pack/qa/third-party/hipchat/build.gradle b/x-pack/qa/third-party/hipchat/build.gradle index 03b6c31969844..2b2ee7fcbbf87 100644 --- a/x-pack/qa/third-party/hipchat/build.gradle +++ b/x-pack/qa/third-party/hipchat/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index 3814c8e9a5382..283f9688699bd 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index c0f337e160e0a..12758989d0f21 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index 431752765f3a0..f1bcd98cff694 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle index a94ad8fd59267..3ece6dd1147c4 100644 --- a/x-pack/qa/transport-client-tests/build.gradle +++ b/x-pack/qa/transport-client-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile project(path: xpackModule('core'), configuration: 'shadow') + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index f6a1f6cb16f2a..11b0e67183c8f 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -3,7 +3,7 @@ apply plugin: 'elasticsearch.build' dependencies { compile 'org.ow2.asm:asm:6.2' compile "org.elasticsearch:elasticsearch:${version}" - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile "org.elasticsearch.test:framework:${version}" } diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle index 7155dad5ee60d..a96f4146fbf67 100644 --- a/x-pack/transport-client/build.gradle +++ b/x-pack/transport-client/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.precommit.PrecommitTasks - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -10,7 +8,7 @@ archivesBaseName = 'x-pack-transport' dependencies { // this "api" dependency looks weird, but it is correct, as it contains // all of x-pack for now, and transport client will be going away in the future. - compile project(path: xpackModule('core'), configuration: 'shadow') + compile "org.elasticsearch.plugin:x-pack-core:${version}" compile "org.elasticsearch.client:transport:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" @@ -22,8 +20,7 @@ dependencyLicenses.enabled = false forbiddenApisTest { // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to // be pulled in - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), - PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] + replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } namingConventions {